mirror of https://github.com/etcd-io/dbtester.git
Merge pull request #308 from gyuho/update
*: update config formats, etc
This commit is contained in:
commit
74de567743
|
|
@ -18,6 +18,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/coreos/dbtester/dbtesterpb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// startCetcd starts cetcd. This assumes that etcd is already started.
|
// startCetcd starts cetcd. This assumes that etcd is already started.
|
||||||
|
|
@ -32,11 +34,19 @@ func startCetcd(fs *flags, t *transporterServer) error {
|
||||||
clientURLs[i] = fmt.Sprintf("http://%s:2379", u)
|
clientURLs[i] = fmt.Sprintf("http://%s:2379", u)
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := []string{
|
var flags []string
|
||||||
|
switch t.req.DatabaseID {
|
||||||
|
case dbtesterpb.DatabaseID_cetcd__beta:
|
||||||
|
flags = []string{
|
||||||
// "-consuladdr", "0.0.0.0:8500",
|
// "-consuladdr", "0.0.0.0:8500",
|
||||||
"-consuladdr", fmt.Sprintf("%s:8500", peerIPs[t.req.IpIndex]),
|
"-consuladdr", fmt.Sprintf("%s:8500", peerIPs[t.req.IPIndex]),
|
||||||
"-etcd", clientURLs[t.req.IpIndex], // etcd endpoint
|
"-etcd", clientURLs[t.req.IPIndex], // etcd endpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("database ID %q is not supported", t.req.DatabaseID)
|
||||||
|
}
|
||||||
|
|
||||||
flagString := strings.Join(flags, " ")
|
flagString := strings.Join(flags, " ")
|
||||||
|
|
||||||
cmd := exec.Command(fs.cetcdExec, flags...)
|
cmd := exec.Command(fs.cetcdExec, flags...)
|
||||||
|
|
@ -51,7 +61,7 @@ func startCetcd(fs *flags, t *transporterServer) error {
|
||||||
t.proxyCmd = cmd
|
t.proxyCmd = cmd
|
||||||
t.proxyCmdWait = make(chan struct{})
|
t.proxyCmdWait = make(chan struct{})
|
||||||
t.proxyPid = int64(cmd.Process.Pid)
|
t.proxyPid = int64(cmd.Process.Pid)
|
||||||
plog.Infof("started database %q (PID: %d)", cs, t.pid)
|
|
||||||
|
|
||||||
|
plog.Infof("started database %q (PID: %d)", cs, t.pid)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/coreos/dbtester/dbtesterpb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// startConsul starts Consul.
|
// startConsul starts Consul.
|
||||||
|
|
@ -34,27 +36,55 @@ func startConsul(fs *flags, t *transporterServer) error {
|
||||||
peerIPs := strings.Split(t.req.PeerIPsString, "___")
|
peerIPs := strings.Split(t.req.PeerIPsString, "___")
|
||||||
|
|
||||||
var flags []string
|
var flags []string
|
||||||
switch t.req.IpIndex {
|
switch t.req.DatabaseID {
|
||||||
|
case dbtesterpb.DatabaseID_consul__v0_7_5:
|
||||||
|
switch t.req.IPIndex {
|
||||||
case 0: // leader
|
case 0: // leader
|
||||||
flags = []string{
|
flags = []string{
|
||||||
"agent",
|
"agent",
|
||||||
"-server",
|
"-server",
|
||||||
"-data-dir", fs.consulDataDir,
|
"-data-dir", fs.consulDataDir,
|
||||||
"-bind", peerIPs[t.req.IpIndex],
|
"-bind", peerIPs[t.req.IPIndex],
|
||||||
"-client", peerIPs[t.req.IpIndex],
|
"-client", peerIPs[t.req.IPIndex],
|
||||||
"-bootstrap-expect", "3",
|
"-bootstrap-expect", fmt.Sprintf("%d", len(peerIPs)),
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
flags = []string{
|
flags = []string{
|
||||||
"agent",
|
"agent",
|
||||||
"-server",
|
"-server",
|
||||||
"-data-dir", fs.consulDataDir,
|
"-data-dir", fs.consulDataDir,
|
||||||
"-bind", peerIPs[t.req.IpIndex],
|
"-bind", peerIPs[t.req.IPIndex],
|
||||||
"-client", peerIPs[t.req.IpIndex],
|
"-client", peerIPs[t.req.IPIndex],
|
||||||
"-join", peerIPs[0],
|
"-join", peerIPs[0],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case dbtesterpb.DatabaseID_consul__v0_8_0:
|
||||||
|
switch t.req.IPIndex {
|
||||||
|
case 0: // leader
|
||||||
|
flags = []string{
|
||||||
|
"agent",
|
||||||
|
"-server",
|
||||||
|
"-data-dir", fs.consulDataDir,
|
||||||
|
"-bind", peerIPs[t.req.IPIndex],
|
||||||
|
"-client", peerIPs[t.req.IPIndex],
|
||||||
|
"-bootstrap-expect", fmt.Sprintf("%d", len(peerIPs)),
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
flags = []string{
|
||||||
|
"agent",
|
||||||
|
"-server",
|
||||||
|
"-data-dir", fs.consulDataDir,
|
||||||
|
"-bind", peerIPs[t.req.IPIndex],
|
||||||
|
"-client", peerIPs[t.req.IPIndex],
|
||||||
|
"-join", peerIPs[0],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("database ID %q is not supported", t.req.DatabaseID)
|
||||||
|
}
|
||||||
|
|
||||||
flagString := strings.Join(flags, " ")
|
flagString := strings.Join(flags, " ")
|
||||||
|
|
||||||
cmd := exec.Command(fs.consulExec, flags...)
|
cmd := exec.Command(fs.consulExec, flags...)
|
||||||
|
|
@ -69,7 +99,7 @@ func startConsul(fs *flags, t *transporterServer) error {
|
||||||
t.cmd = cmd
|
t.cmd = cmd
|
||||||
t.cmdWait = make(chan struct{})
|
t.cmdWait = make(chan struct{})
|
||||||
t.pid = int64(cmd.Process.Pid)
|
t.pid = int64(cmd.Process.Pid)
|
||||||
plog.Infof("started database %q (PID: %d)", cs, t.pid)
|
|
||||||
|
|
||||||
|
plog.Infof("started database %q (PID: %d)", cs, t.pid)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/coreos/dbtester/dbtesterpb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// startEtcd starts etcd v2 and v3.
|
// startEtcd starts etcd v2 and v3.
|
||||||
|
|
@ -44,28 +46,87 @@ func startEtcd(fs *flags, t *transporterServer) error {
|
||||||
members[i] = fmt.Sprintf("%s=%s", names[i], peerURLs[i])
|
members[i] = fmt.Sprintf("%s=%s", names[i], peerURLs[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
qv := t.req.Etcdv3Config.QuotaSizeBytes
|
var flags []string
|
||||||
if qv > 8000000000 {
|
switch t.req.DatabaseID {
|
||||||
plog.Warningf("maximum etcd quota is 8GB (got %d)... resetting to 8GB...", qv)
|
case dbtesterpb.DatabaseID_etcd__v2_3:
|
||||||
qv = 8000000000
|
flags = []string{
|
||||||
}
|
"--name", names[t.req.IPIndex],
|
||||||
flags := []string{
|
|
||||||
"--name", names[t.req.IpIndex],
|
|
||||||
"--data-dir", fs.etcdDataDir,
|
"--data-dir", fs.etcdDataDir,
|
||||||
"--quota-backend-bytes", fmt.Sprintf("%d", qv),
|
|
||||||
|
|
||||||
"--snapshot-count", fmt.Sprintf("%d", t.req.Etcdv3Config.SnapCount),
|
"--snapshot-count", fmt.Sprintf("%d", t.req.Flag_Etcd_Tip.SnapshotCount),
|
||||||
|
|
||||||
"--listen-client-urls", clientURLs[t.req.IpIndex],
|
"--listen-client-urls", clientURLs[t.req.IPIndex],
|
||||||
"--advertise-client-urls", clientURLs[t.req.IpIndex],
|
"--advertise-client-urls", clientURLs[t.req.IPIndex],
|
||||||
|
|
||||||
"--listen-peer-urls", peerURLs[t.req.IpIndex],
|
"--listen-peer-urls", peerURLs[t.req.IPIndex],
|
||||||
"--initial-advertise-peer-urls", peerURLs[t.req.IpIndex],
|
"--initial-advertise-peer-urls", peerURLs[t.req.IPIndex],
|
||||||
|
|
||||||
"--initial-cluster-token", "dbtester-etcd-token",
|
"--initial-cluster-token", "dbtester-etcd-token",
|
||||||
"--initial-cluster", strings.Join(members, ","),
|
"--initial-cluster", strings.Join(members, ","),
|
||||||
"--initial-cluster-state", "new",
|
"--initial-cluster-state", "new",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case dbtesterpb.DatabaseID_etcd__v3_1:
|
||||||
|
flags = []string{
|
||||||
|
"--name", names[t.req.IPIndex],
|
||||||
|
"--data-dir", fs.etcdDataDir,
|
||||||
|
"--quota-backend-bytes", fmt.Sprintf("%d", t.req.Flag_Etcd_Tip.QuotaSizeBytes),
|
||||||
|
|
||||||
|
"--snapshot-count", fmt.Sprintf("%d", t.req.Flag_Etcd_Tip.SnapshotCount),
|
||||||
|
|
||||||
|
"--listen-client-urls", clientURLs[t.req.IPIndex],
|
||||||
|
"--advertise-client-urls", clientURLs[t.req.IPIndex],
|
||||||
|
|
||||||
|
"--listen-peer-urls", peerURLs[t.req.IPIndex],
|
||||||
|
"--initial-advertise-peer-urls", peerURLs[t.req.IPIndex],
|
||||||
|
|
||||||
|
"--initial-cluster-token", "dbtester-etcd-token",
|
||||||
|
"--initial-cluster", strings.Join(members, ","),
|
||||||
|
"--initial-cluster-state", "new",
|
||||||
|
}
|
||||||
|
|
||||||
|
case dbtesterpb.DatabaseID_etcd__v3_2:
|
||||||
|
flags = []string{
|
||||||
|
"--name", names[t.req.IPIndex],
|
||||||
|
"--data-dir", fs.etcdDataDir,
|
||||||
|
"--quota-backend-bytes", fmt.Sprintf("%d", t.req.Flag_Etcd_Tip.QuotaSizeBytes),
|
||||||
|
|
||||||
|
"--snapshot-count", fmt.Sprintf("%d", t.req.Flag_Etcd_Tip.SnapshotCount),
|
||||||
|
|
||||||
|
"--listen-client-urls", clientURLs[t.req.IPIndex],
|
||||||
|
"--advertise-client-urls", clientURLs[t.req.IPIndex],
|
||||||
|
|
||||||
|
"--listen-peer-urls", peerURLs[t.req.IPIndex],
|
||||||
|
"--initial-advertise-peer-urls", peerURLs[t.req.IPIndex],
|
||||||
|
|
||||||
|
"--initial-cluster-token", "dbtester-etcd-token",
|
||||||
|
"--initial-cluster", strings.Join(members, ","),
|
||||||
|
"--initial-cluster-state", "new",
|
||||||
|
}
|
||||||
|
|
||||||
|
case dbtesterpb.DatabaseID_etcd__tip:
|
||||||
|
flags = []string{
|
||||||
|
"--name", names[t.req.IPIndex],
|
||||||
|
"--data-dir", fs.etcdDataDir,
|
||||||
|
"--quota-backend-bytes", fmt.Sprintf("%d", t.req.Flag_Etcd_Tip.QuotaSizeBytes),
|
||||||
|
|
||||||
|
"--snapshot-count", fmt.Sprintf("%d", t.req.Flag_Etcd_Tip.SnapshotCount),
|
||||||
|
|
||||||
|
"--listen-client-urls", clientURLs[t.req.IPIndex],
|
||||||
|
"--advertise-client-urls", clientURLs[t.req.IPIndex],
|
||||||
|
|
||||||
|
"--listen-peer-urls", peerURLs[t.req.IPIndex],
|
||||||
|
"--initial-advertise-peer-urls", peerURLs[t.req.IPIndex],
|
||||||
|
|
||||||
|
"--initial-cluster-token", "dbtester-etcd-token",
|
||||||
|
"--initial-cluster", strings.Join(members, ","),
|
||||||
|
"--initial-cluster-state", "new",
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("database ID %q is not supported", t.req.DatabaseID)
|
||||||
|
}
|
||||||
|
|
||||||
flagString := strings.Join(flags, " ")
|
flagString := strings.Join(flags, " ")
|
||||||
|
|
||||||
cmd := exec.Command(fs.etcdExec, flags...)
|
cmd := exec.Command(fs.etcdExec, flags...)
|
||||||
|
|
@ -80,7 +141,7 @@ func startEtcd(fs *flags, t *transporterServer) error {
|
||||||
t.cmd = cmd
|
t.cmd = cmd
|
||||||
t.cmdWait = make(chan struct{})
|
t.cmdWait = make(chan struct{})
|
||||||
t.pid = int64(cmd.Process.Pid)
|
t.pid = int64(cmd.Process.Pid)
|
||||||
plog.Infof("started database %q (PID: %d)", cs, t.pid)
|
|
||||||
|
|
||||||
|
plog.Infof("started database %q (PID: %d)", cs, t.pid)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/coreos/dbtester/dbtesterpb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// startZetcd starts zetcd. This assumes that etcd is already started.
|
// startZetcd starts zetcd. This assumes that etcd is already started.
|
||||||
|
|
@ -32,11 +34,19 @@ func startZetcd(fs *flags, t *transporterServer) error {
|
||||||
clientURLs[i] = fmt.Sprintf("http://%s:2379", u)
|
clientURLs[i] = fmt.Sprintf("http://%s:2379", u)
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := []string{
|
var flags []string
|
||||||
|
switch t.req.DatabaseID {
|
||||||
|
case dbtesterpb.DatabaseID_zetcd__beta:
|
||||||
|
flags = []string{
|
||||||
// "-zkaddr", "0.0.0.0:2181",
|
// "-zkaddr", "0.0.0.0:2181",
|
||||||
"-zkaddr", fmt.Sprintf("%s:2181", peerIPs[t.req.IpIndex]),
|
"-zkaddr", fmt.Sprintf("%s:2181", peerIPs[t.req.IPIndex]),
|
||||||
"-endpoint", clientURLs[t.req.IpIndex],
|
"-endpoint", clientURLs[t.req.IPIndex],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("database ID %q is not supported", t.req.DatabaseID)
|
||||||
|
}
|
||||||
|
|
||||||
flagString := strings.Join(flags, " ")
|
flagString := strings.Join(flags, " ")
|
||||||
|
|
||||||
cmd := exec.Command(fs.zetcdExec, flags...)
|
cmd := exec.Command(fs.zetcdExec, flags...)
|
||||||
|
|
@ -51,7 +61,7 @@ func startZetcd(fs *flags, t *transporterServer) error {
|
||||||
t.proxyCmd = cmd
|
t.proxyCmd = cmd
|
||||||
t.proxyCmdWait = make(chan struct{})
|
t.proxyCmdWait = make(chan struct{})
|
||||||
t.proxyPid = int64(cmd.Process.Pid)
|
t.proxyPid = int64(cmd.Process.Pid)
|
||||||
plog.Infof("started database %q (PID: %d)", cs, t.pid)
|
|
||||||
|
|
||||||
|
plog.Infof("started database %q (PID: %d)", cs, t.pid)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,8 @@ import (
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/coreos/dbtester/dbtesterpb"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
@ -64,6 +66,23 @@ func init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Java class paths for Zookeeper.
|
||||||
|
// '-cp' is for 'class search path of directories and zip/jar files'.
|
||||||
|
// See https://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html for more.
|
||||||
|
const (
|
||||||
|
// JavaClassPathZookeeperr349 is the Java class paths of Zookeeper r3.4.9.
|
||||||
|
// CHANGE THIS FOR DIFFERENT ZOOKEEPER RELEASE!
|
||||||
|
// THIS IS ONLY VALID FOR Zookeeper r3.4.9.
|
||||||
|
// Search correct paths with 'find ./zookeeper/lib | sort'.
|
||||||
|
JavaClassPathZookeeperr349 = `-cp zookeeper-3.4.9.jar:lib/slf4j-api-1.6.1.jar:lib/slf4j-log4j12-1.6.1.jar:lib/log4j-1.2.16.jar:conf org.apache.zookeeper.server.quorum.QuorumPeerMain`
|
||||||
|
|
||||||
|
// JavaClassPathZookeeperr352alpha is the Java class paths of Zookeeper r3.5.2-alpha.
|
||||||
|
// CHANGE THIS FOR DIFFERENT ZOOKEEPER RELEASE!
|
||||||
|
// THIS IS ONLY VALID FOR Zookeeper r3.5.2-alpha.
|
||||||
|
// Search correct paths with 'find ./zookeeper/lib | sort'.
|
||||||
|
JavaClassPathZookeeperr352alpha = `-cp zookeeper-3.5.2-alpha.jar:lib/slf4j-api-1.7.5.jar:lib/slf4j-log4j12-1.7.5.jar:lib/log4j-1.2.17.jar:conf org.apache.zookeeper.server.quorum.QuorumPeerMain`
|
||||||
|
)
|
||||||
|
|
||||||
// startZookeeper starts Zookeeper.
|
// startZookeeper starts Zookeeper.
|
||||||
func startZookeeper(fs *flags, t *transporterServer) error {
|
func startZookeeper(fs *flags, t *transporterServer) error {
|
||||||
if !exist(fs.javaExec) {
|
if !exist(fs.javaExec) {
|
||||||
|
|
@ -84,44 +103,104 @@ func startZookeeper(fs *flags, t *transporterServer) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
ipath := filepath.Join(fs.zkDataDir, "myid")
|
ipath := filepath.Join(fs.zkDataDir, "myid")
|
||||||
plog.Infof("writing Zookeeper myid file %d to %s", t.req.ZookeeperConfig.MyID, ipath)
|
switch t.req.DatabaseID {
|
||||||
if err := toFile(fmt.Sprintf("%d", t.req.ZookeeperConfig.MyID), ipath); err != nil {
|
case dbtesterpb.DatabaseID_zookeeper__r3_4_9:
|
||||||
|
if t.req.Flag_Zookeeper_R3_4_9 == nil {
|
||||||
|
return fmt.Errorf("request 'Flag_Zookeeper_R3_4_9' is nil")
|
||||||
|
}
|
||||||
|
plog.Infof("writing Zookeeper myid file %d to %s", t.req.Flag_Zookeeper_R3_4_9.MyID, ipath)
|
||||||
|
if err := toFile(fmt.Sprintf("%d", t.req.Flag_Zookeeper_R3_4_9.MyID), ipath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
case dbtesterpb.DatabaseID_zookeeper__r3_5_2_alpha:
|
||||||
|
if t.req.Flag_Zookeeper_R3_5_2Alpha == nil {
|
||||||
|
return fmt.Errorf("request 'Flag_Zookeeper_R3_5_2Alpha' is nil")
|
||||||
|
}
|
||||||
|
plog.Infof("writing Zookeeper myid file %d to %s", t.req.Flag_Zookeeper_R3_5_2Alpha.MyID, ipath)
|
||||||
|
if err := toFile(fmt.Sprintf("%d", t.req.Flag_Zookeeper_R3_5_2Alpha.MyID), ipath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("database ID %q is not supported", t.req.DatabaseID)
|
||||||
|
}
|
||||||
|
|
||||||
|
var cfg ZookeeperConfig
|
||||||
peerIPs := strings.Split(t.req.PeerIPsString, "___")
|
peerIPs := strings.Split(t.req.PeerIPsString, "___")
|
||||||
peers := []ZookeeperPeer{}
|
peers := []ZookeeperPeer{}
|
||||||
for i := range peerIPs {
|
for i := range peerIPs {
|
||||||
peers = append(peers, ZookeeperPeer{MyID: i + 1, IP: peerIPs[i]})
|
peers = append(peers, ZookeeperPeer{MyID: i + 1, IP: peerIPs[i]})
|
||||||
}
|
}
|
||||||
cfg := ZookeeperConfig{
|
switch t.req.DatabaseID {
|
||||||
TickTime: t.req.ZookeeperConfig.TickTime,
|
case dbtesterpb.DatabaseID_zookeeper__r3_4_9:
|
||||||
|
cfg = ZookeeperConfig{
|
||||||
|
TickTime: t.req.Flag_Zookeeper_R3_4_9.TickTime,
|
||||||
DataDir: fs.zkDataDir,
|
DataDir: fs.zkDataDir,
|
||||||
ClientPort: t.req.ZookeeperConfig.ClientPort,
|
ClientPort: t.req.Flag_Zookeeper_R3_4_9.ClientPort,
|
||||||
InitLimit: t.req.ZookeeperConfig.InitLimit,
|
InitLimit: t.req.Flag_Zookeeper_R3_4_9.InitLimit,
|
||||||
SyncLimit: t.req.ZookeeperConfig.SyncLimit,
|
SyncLimit: t.req.Flag_Zookeeper_R3_4_9.SyncLimit,
|
||||||
MaxClientConnections: t.req.ZookeeperConfig.MaxClientConnections,
|
MaxClientConnections: t.req.Flag_Zookeeper_R3_4_9.MaxClientConnections,
|
||||||
Peers: peers,
|
Peers: peers,
|
||||||
SnapCount: t.req.ZookeeperConfig.SnapCount,
|
SnapCount: t.req.Flag_Zookeeper_R3_4_9.SnapCount,
|
||||||
|
}
|
||||||
|
case dbtesterpb.DatabaseID_zookeeper__r3_5_2_alpha:
|
||||||
|
cfg = ZookeeperConfig{
|
||||||
|
TickTime: t.req.Flag_Zookeeper_R3_5_2Alpha.TickTime,
|
||||||
|
DataDir: fs.zkDataDir,
|
||||||
|
ClientPort: t.req.Flag_Zookeeper_R3_5_2Alpha.ClientPort,
|
||||||
|
InitLimit: t.req.Flag_Zookeeper_R3_5_2Alpha.InitLimit,
|
||||||
|
SyncLimit: t.req.Flag_Zookeeper_R3_5_2Alpha.SyncLimit,
|
||||||
|
MaxClientConnections: t.req.Flag_Zookeeper_R3_5_2Alpha.MaxClientConnections,
|
||||||
|
Peers: peers,
|
||||||
|
SnapCount: t.req.Flag_Zookeeper_R3_5_2Alpha.SnapCount,
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("database ID %q is not supported", t.req.DatabaseID)
|
||||||
}
|
}
|
||||||
tpl := template.Must(template.New("zkTemplate").Parse(zkTemplate))
|
tpl := template.Must(template.New("zkTemplate").Parse(zkTemplate))
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
if err := tpl.Execute(buf, cfg); err != nil {
|
if err := tpl.Execute(buf, cfg); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
zc := buf.String()
|
zctxt := buf.String()
|
||||||
|
plog.Infof("writing Zookeeper config file %q (config %q)", fs.zkConfig, zctxt)
|
||||||
plog.Infof("writing Zookeeper config file %q (config %q)", fs.zkConfig, zc)
|
if err := toFile(zctxt, fs.zkConfig); err != nil {
|
||||||
if err := toFile(zc, fs.zkConfig); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHANGE THIS FOR DIFFERENT ZOOKEEPER RELEASE
|
args := []string{shell}
|
||||||
// https://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html
|
var flagString string
|
||||||
// THIS IS ONLY VALID FOR Zookeeper r3.4.9
|
switch t.req.DatabaseID {
|
||||||
flagString := `-cp zookeeper-3.4.9.jar:lib/slf4j-api-1.6.1.jar:lib/slf4j-log4j12-1.6.1.jar:lib/log4j-1.2.16.jar:conf org.apache.zookeeper.server.quorum.QuorumPeerMain`
|
case dbtesterpb.DatabaseID_zookeeper__r3_4_9:
|
||||||
|
flagString = JavaClassPathZookeeperr349
|
||||||
|
// -Djute.maxbuffer=33554432 -Xms50G -Xmx50G
|
||||||
|
if t.req.Flag_Zookeeper_R3_4_9.JavaDJuteMaxBuffer != 0 {
|
||||||
|
args = append(args, fmt.Sprintf("-Djute.maxbuffer=%d", t.req.Flag_Zookeeper_R3_4_9.JavaDJuteMaxBuffer))
|
||||||
|
}
|
||||||
|
if t.req.Flag_Zookeeper_R3_4_9.JavaDJuteMaxBuffer != 0 {
|
||||||
|
args = append(args, fmt.Sprintf("-Xms%s", t.req.Flag_Zookeeper_R3_4_9.JavaXms))
|
||||||
|
}
|
||||||
|
if t.req.Flag_Zookeeper_R3_4_9.JavaDJuteMaxBuffer != 0 {
|
||||||
|
args = append(args, fmt.Sprintf("-Xmx%s", t.req.Flag_Zookeeper_R3_4_9.JavaXmx))
|
||||||
|
}
|
||||||
|
|
||||||
args := []string{shell, "-c", fs.javaExec + " " + flagString + " " + fs.zkConfig}
|
case dbtesterpb.DatabaseID_zookeeper__r3_5_2_alpha:
|
||||||
|
flagString = JavaClassPathZookeeperr352alpha
|
||||||
|
// -Djute.maxbuffer=33554432 -Xms50G -Xmx50G
|
||||||
|
if t.req.Flag_Zookeeper_R3_5_2Alpha.JavaDJuteMaxBuffer != 0 {
|
||||||
|
args = append(args, fmt.Sprintf("-Djute.maxbuffer=%d", t.req.Flag_Zookeeper_R3_5_2Alpha.JavaDJuteMaxBuffer))
|
||||||
|
}
|
||||||
|
if t.req.Flag_Zookeeper_R3_5_2Alpha.JavaDJuteMaxBuffer != 0 {
|
||||||
|
args = append(args, fmt.Sprintf("-Xms%s", t.req.Flag_Zookeeper_R3_5_2Alpha.JavaXms))
|
||||||
|
}
|
||||||
|
if t.req.Flag_Zookeeper_R3_5_2Alpha.JavaDJuteMaxBuffer != 0 {
|
||||||
|
args = append(args, fmt.Sprintf("-Xmx%s", t.req.Flag_Zookeeper_R3_5_2Alpha.JavaXmx))
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("database ID %q is not supported", t.req.DatabaseID)
|
||||||
|
}
|
||||||
|
|
||||||
|
args = append(args, "-c", fs.javaExec+" "+flagString+" "+fs.zkConfig)
|
||||||
|
|
||||||
cmd := exec.Command(args[0], args[1:]...)
|
cmd := exec.Command(args[0], args[1:]...)
|
||||||
cmd.Stdout = t.databaseLogFile
|
cmd.Stdout = t.databaseLogFile
|
||||||
|
|
@ -135,7 +214,7 @@ func startZookeeper(fs *flags, t *transporterServer) error {
|
||||||
t.cmd = cmd
|
t.cmd = cmd
|
||||||
t.cmdWait = make(chan struct{})
|
t.cmdWait = make(chan struct{})
|
||||||
t.pid = int64(cmd.Process.Pid)
|
t.pid = int64(cmd.Process.Pid)
|
||||||
plog.Infof("started database %q (PID: %d)", cs, t.pid)
|
|
||||||
|
|
||||||
|
plog.Infof("started database %q (PID: %d)", cs, t.pid)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -24,6 +24,7 @@ import (
|
||||||
|
|
||||||
"github.com/coreos/dbtester/dbtesterpb"
|
"github.com/coreos/dbtester/dbtesterpb"
|
||||||
"github.com/coreos/dbtester/pkg/fileinspect"
|
"github.com/coreos/dbtester/pkg/fileinspect"
|
||||||
|
|
||||||
"github.com/gyuho/psn"
|
"github.com/gyuho/psn"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
@ -78,7 +79,7 @@ func (t *transporterServer) Transfer(ctx context.Context, req *dbtesterpb.Reques
|
||||||
plog.Infof("received gRPC request %q with database %q (clients: %d)", req.Operation, req.DatabaseID, req.CurrentClientNumber)
|
plog.Infof("received gRPC request %q with database %q (clients: %d)", req.Operation, req.DatabaseID, req.CurrentClientNumber)
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.Operation == dbtesterpb.Request_Start {
|
if req.Operation == dbtesterpb.Operation_Start {
|
||||||
f, err := openToAppend(globalFlags.databaseLog)
|
f, err := openToAppend(globalFlags.databaseLog)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -87,7 +88,7 @@ func (t *transporterServer) Transfer(ctx context.Context, req *dbtesterpb.Reques
|
||||||
|
|
||||||
plog.Infof("agent log path: %q", globalFlags.agentLog)
|
plog.Infof("agent log path: %q", globalFlags.agentLog)
|
||||||
plog.Infof("database log path: %q", globalFlags.databaseLog)
|
plog.Infof("database log path: %q", globalFlags.databaseLog)
|
||||||
if req.DatabaseID == dbtesterpb.Request_zetcd || req.DatabaseID == dbtesterpb.Request_cetcd {
|
if req.DatabaseID == dbtesterpb.DatabaseID_zetcd__beta || req.DatabaseID == dbtesterpb.DatabaseID_cetcd__beta {
|
||||||
proxyLog := globalFlags.databaseLog + "-" + t.req.DatabaseID.String()
|
proxyLog := globalFlags.databaseLog + "-" + t.req.DatabaseID.String()
|
||||||
pf, err := openToAppend(proxyLog)
|
pf, err := openToAppend(proxyLog)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -99,24 +100,24 @@ func (t *transporterServer) Transfer(ctx context.Context, req *dbtesterpb.Reques
|
||||||
plog.Infof("system metrics CSV path: %q", globalFlags.systemMetricsCSV)
|
plog.Infof("system metrics CSV path: %q", globalFlags.systemMetricsCSV)
|
||||||
|
|
||||||
switch req.DatabaseID {
|
switch req.DatabaseID {
|
||||||
case dbtesterpb.Request_zookeeper:
|
case dbtesterpb.DatabaseID_zookeeper__r3_4_9:
|
||||||
plog.Infof("Zookeeper working directory: %q", globalFlags.zkWorkDir)
|
plog.Infof("Zookeeper working directory: %q", globalFlags.zkWorkDir)
|
||||||
plog.Infof("Zookeeper data directory: %q", globalFlags.zkDataDir)
|
plog.Infof("Zookeeper data directory: %q", globalFlags.zkDataDir)
|
||||||
plog.Infof("Zookeeper configuration path: %q", globalFlags.zkConfig)
|
plog.Infof("Zookeeper configuration path: %q", globalFlags.zkConfig)
|
||||||
|
|
||||||
case dbtesterpb.Request_etcdv2, dbtesterpb.Request_etcdv3:
|
case dbtesterpb.DatabaseID_etcd__v2_3, dbtesterpb.DatabaseID_etcd__tip:
|
||||||
plog.Infof("etcd executable binary path: %q", globalFlags.etcdExec)
|
plog.Infof("etcd executable binary path: %q", globalFlags.etcdExec)
|
||||||
plog.Infof("etcd data directory: %q", globalFlags.etcdDataDir)
|
plog.Infof("etcd data directory: %q", globalFlags.etcdDataDir)
|
||||||
|
|
||||||
case dbtesterpb.Request_zetcd:
|
case dbtesterpb.DatabaseID_zetcd__beta:
|
||||||
plog.Infof("zetcd executable binary path: %q", globalFlags.zetcdExec)
|
plog.Infof("zetcd executable binary path: %q", globalFlags.zetcdExec)
|
||||||
plog.Infof("zetcd data directory: %q", globalFlags.etcdDataDir)
|
plog.Infof("zetcd data directory: %q", globalFlags.etcdDataDir)
|
||||||
|
|
||||||
case dbtesterpb.Request_cetcd:
|
case dbtesterpb.DatabaseID_cetcd__beta:
|
||||||
plog.Infof("cetcd executable binary path: %q", globalFlags.cetcdExec)
|
plog.Infof("cetcd executable binary path: %q", globalFlags.cetcdExec)
|
||||||
plog.Infof("cetcd data directory: %q", globalFlags.etcdDataDir)
|
plog.Infof("cetcd data directory: %q", globalFlags.etcdDataDir)
|
||||||
|
|
||||||
case dbtesterpb.Request_consul:
|
case dbtesterpb.DatabaseID_consul__v0_7_5:
|
||||||
plog.Infof("Consul executable binary path: %q", globalFlags.consulExec)
|
plog.Infof("Consul executable binary path: %q", globalFlags.consulExec)
|
||||||
plog.Infof("Consul data directory: %q", globalFlags.consulDataDir)
|
plog.Infof("Consul data directory: %q", globalFlags.consulDataDir)
|
||||||
}
|
}
|
||||||
|
|
@ -124,21 +125,21 @@ func (t *transporterServer) Transfer(ctx context.Context, req *dbtesterpb.Reques
|
||||||
// re-use configurations for next requests
|
// re-use configurations for next requests
|
||||||
t.req = *req
|
t.req = *req
|
||||||
}
|
}
|
||||||
if req.Operation == dbtesterpb.Request_Heartbeat {
|
if req.Operation == dbtesterpb.Operation_Heartbeat {
|
||||||
t.req.CurrentClientNumber = req.CurrentClientNumber
|
t.req.CurrentClientNumber = req.CurrentClientNumber
|
||||||
}
|
}
|
||||||
|
|
||||||
var diskSpaceUsageBytes int64
|
var diskSpaceUsageBytes int64
|
||||||
switch req.Operation {
|
switch req.Operation {
|
||||||
case dbtesterpb.Request_Start:
|
case dbtesterpb.Operation_Start:
|
||||||
switch t.req.DatabaseID {
|
switch t.req.DatabaseID {
|
||||||
case dbtesterpb.Request_etcdv2, dbtesterpb.Request_etcdv3, dbtesterpb.Request_zetcd, dbtesterpb.Request_cetcd:
|
case dbtesterpb.DatabaseID_etcd__v2_3, dbtesterpb.DatabaseID_etcd__tip, dbtesterpb.DatabaseID_zetcd__beta, dbtesterpb.DatabaseID_cetcd__beta:
|
||||||
if err := startEtcd(&globalFlags, t); err != nil {
|
if err := startEtcd(&globalFlags, t); err != nil {
|
||||||
plog.Errorf("startEtcd error %v", err)
|
plog.Errorf("startEtcd error %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
switch t.req.DatabaseID {
|
switch t.req.DatabaseID {
|
||||||
case dbtesterpb.Request_zetcd:
|
case dbtesterpb.DatabaseID_zetcd__beta:
|
||||||
if err := startZetcd(&globalFlags, t); err != nil {
|
if err := startZetcd(&globalFlags, t); err != nil {
|
||||||
plog.Errorf("startZetcd error %v", err)
|
plog.Errorf("startZetcd error %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -151,7 +152,7 @@ func (t *transporterServer) Transfer(ctx context.Context, req *dbtesterpb.Reques
|
||||||
}
|
}
|
||||||
plog.Infof("exiting %q", t.proxyCmd.Path)
|
plog.Infof("exiting %q", t.proxyCmd.Path)
|
||||||
}()
|
}()
|
||||||
case dbtesterpb.Request_cetcd:
|
case dbtesterpb.DatabaseID_cetcd__beta:
|
||||||
if err := startCetcd(&globalFlags, t); err != nil {
|
if err := startCetcd(&globalFlags, t); err != nil {
|
||||||
plog.Errorf("startCetcd error %v", err)
|
plog.Errorf("startCetcd error %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -165,12 +166,12 @@ func (t *transporterServer) Transfer(ctx context.Context, req *dbtesterpb.Reques
|
||||||
plog.Infof("exiting %q", t.proxyCmd.Path)
|
plog.Infof("exiting %q", t.proxyCmd.Path)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
case dbtesterpb.Request_zookeeper:
|
case dbtesterpb.DatabaseID_zookeeper__r3_4_9:
|
||||||
if err := startZookeeper(&globalFlags, t); err != nil {
|
if err := startZookeeper(&globalFlags, t); err != nil {
|
||||||
plog.Errorf("startZookeeper error %v", err)
|
plog.Errorf("startZookeeper error %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
case dbtesterpb.Request_consul:
|
case dbtesterpb.DatabaseID_consul__v0_7_5:
|
||||||
if err := startConsul(&globalFlags, t); err != nil {
|
if err := startConsul(&globalFlags, t); err != nil {
|
||||||
plog.Errorf("startConsul error %v", err)
|
plog.Errorf("startConsul error %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -193,7 +194,7 @@ func (t *transporterServer) Transfer(ctx context.Context, req *dbtesterpb.Reques
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
case dbtesterpb.Request_Stop:
|
case dbtesterpb.Operation_Stop:
|
||||||
if t.cmd == nil {
|
if t.cmd == nil {
|
||||||
return nil, fmt.Errorf("nil command")
|
return nil, fmt.Errorf("nil command")
|
||||||
}
|
}
|
||||||
|
|
@ -253,7 +254,7 @@ func (t *transporterServer) Transfer(ctx context.Context, req *dbtesterpb.Reques
|
||||||
}
|
}
|
||||||
diskSpaceUsageBytes = dbs
|
diskSpaceUsageBytes = dbs
|
||||||
|
|
||||||
case dbtesterpb.Request_Heartbeat:
|
case dbtesterpb.Operation_Heartbeat:
|
||||||
plog.Infof("overwriting clients num %d to %q", t.req.CurrentClientNumber, t.clientNumPath)
|
plog.Infof("overwriting clients num %d to %q", t.req.CurrentClientNumber, t.clientNumPath)
|
||||||
if err := toFile(fmt.Sprintf("%d", t.req.CurrentClientNumber), t.clientNumPath); err != nil {
|
if err := toFile(fmt.Sprintf("%d", t.req.CurrentClientNumber), t.clientNumPath); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -267,19 +268,19 @@ func (t *transporterServer) Transfer(ctx context.Context, req *dbtesterpb.Reques
|
||||||
return &dbtesterpb.Response{Success: true, DiskSpaceUsageBytes: diskSpaceUsageBytes}, nil
|
return &dbtesterpb.Response{Success: true, DiskSpaceUsageBytes: diskSpaceUsageBytes}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func measureDatabasSize(flg flags, rdb dbtesterpb.Request_Database) (int64, error) {
|
func measureDatabasSize(flg flags, rdb dbtesterpb.DatabaseID) (int64, error) {
|
||||||
switch rdb {
|
switch rdb {
|
||||||
case dbtesterpb.Request_etcdv2:
|
case dbtesterpb.DatabaseID_etcd__v2_3:
|
||||||
return fileinspect.Size(flg.etcdDataDir)
|
return fileinspect.Size(flg.etcdDataDir)
|
||||||
case dbtesterpb.Request_etcdv3:
|
case dbtesterpb.DatabaseID_etcd__tip:
|
||||||
return fileinspect.Size(flg.etcdDataDir)
|
return fileinspect.Size(flg.etcdDataDir)
|
||||||
case dbtesterpb.Request_zookeeper:
|
case dbtesterpb.DatabaseID_zookeeper__r3_4_9:
|
||||||
return fileinspect.Size(flg.zkDataDir)
|
return fileinspect.Size(flg.zkDataDir)
|
||||||
case dbtesterpb.Request_consul:
|
case dbtesterpb.DatabaseID_consul__v0_7_5:
|
||||||
return fileinspect.Size(flg.consulDataDir)
|
return fileinspect.Size(flg.consulDataDir)
|
||||||
case dbtesterpb.Request_cetcd:
|
case dbtesterpb.DatabaseID_cetcd__beta:
|
||||||
return fileinspect.Size(flg.etcdDataDir)
|
return fileinspect.Size(flg.etcdDataDir)
|
||||||
case dbtesterpb.Request_zetcd:
|
case dbtesterpb.DatabaseID_zetcd__beta:
|
||||||
return fileinspect.Size(flg.etcdDataDir)
|
return fileinspect.Size(flg.etcdDataDir)
|
||||||
default:
|
default:
|
||||||
return 0, fmt.Errorf("uknown %q", rdb)
|
return 0, fmt.Errorf("uknown %q", rdb)
|
||||||
|
|
|
||||||
|
|
@ -26,8 +26,8 @@ import (
|
||||||
|
|
||||||
// uploadLog starts cetcd. This assumes that etcd is already started.
|
// uploadLog starts cetcd. This assumes that etcd is already started.
|
||||||
func uploadLog(fs *flags, t *transporterServer) error {
|
func uploadLog(fs *flags, t *transporterServer) error {
|
||||||
plog.Infof("stopped collecting metrics; uploading logs to storage %q", t.req.Control.GoogleCloudProjectName)
|
plog.Infof("stopped collecting metrics; uploading logs to storage %q", t.req.ConfigClientMachineInitial.GoogleCloudProjectName)
|
||||||
u, err := remotestorage.NewGoogleCloudStorage([]byte(t.req.Control.GoogleCloudStorageKey), t.req.Control.GoogleCloudProjectName)
|
u, err := remotestorage.NewGoogleCloudStorage([]byte(t.req.ConfigClientMachineInitial.GoogleCloudStorageKey), t.req.ConfigClientMachineInitial.GoogleCloudProjectName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -38,12 +38,12 @@ func uploadLog(fs *flags, t *transporterServer) error {
|
||||||
srcDatabaseLogPath := fs.databaseLog
|
srcDatabaseLogPath := fs.databaseLog
|
||||||
dstDatabaseLogPath := filepath.Base(fs.databaseLog)
|
dstDatabaseLogPath := filepath.Base(fs.databaseLog)
|
||||||
if !strings.HasPrefix(filepath.Base(fs.databaseLog), t.req.DatabaseTag) {
|
if !strings.HasPrefix(filepath.Base(fs.databaseLog), t.req.DatabaseTag) {
|
||||||
dstDatabaseLogPath = fmt.Sprintf("%s-%d-%s", t.req.DatabaseTag, t.req.IpIndex+1, filepath.Base(fs.databaseLog))
|
dstDatabaseLogPath = fmt.Sprintf("%s-%d-%s", t.req.DatabaseTag, t.req.IPIndex+1, filepath.Base(fs.databaseLog))
|
||||||
}
|
}
|
||||||
dstDatabaseLogPath = filepath.Join(t.req.Control.GoogleCloudStorageSubDirectory, dstDatabaseLogPath)
|
dstDatabaseLogPath = filepath.Join(t.req.ConfigClientMachineInitial.GoogleCloudStorageSubDirectory, dstDatabaseLogPath)
|
||||||
plog.Infof("uploading database log [%q -> %q]", srcDatabaseLogPath, dstDatabaseLogPath)
|
plog.Infof("uploading database log [%q -> %q]", srcDatabaseLogPath, dstDatabaseLogPath)
|
||||||
for k := 0; k < 30; k++ {
|
for k := 0; k < 30; k++ {
|
||||||
if uerr = u.UploadFile(t.req.Control.GoogleCloudStorageBucketName, srcDatabaseLogPath, dstDatabaseLogPath); uerr != nil {
|
if uerr = u.UploadFile(t.req.ConfigClientMachineInitial.GoogleCloudStorageBucketName, srcDatabaseLogPath, dstDatabaseLogPath); uerr != nil {
|
||||||
plog.Warningf("UploadFile error... sleep and retry... (%v)", uerr)
|
plog.Warningf("UploadFile error... sleep and retry... (%v)", uerr)
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
continue
|
continue
|
||||||
|
|
@ -57,17 +57,17 @@ func uploadLog(fs *flags, t *transporterServer) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
if t.req.DatabaseID == dbtesterpb.Request_zetcd || t.req.DatabaseID == dbtesterpb.Request_cetcd {
|
if t.req.DatabaseID == dbtesterpb.DatabaseID_zetcd__beta || t.req.DatabaseID == dbtesterpb.DatabaseID_cetcd__beta {
|
||||||
dpath := fs.databaseLog + "-" + t.req.DatabaseID.String()
|
dpath := fs.databaseLog + "-" + t.req.DatabaseID.String()
|
||||||
srcDatabaseLogPath2 := dpath
|
srcDatabaseLogPath2 := dpath
|
||||||
dstDatabaseLogPath2 := filepath.Base(dpath)
|
dstDatabaseLogPath2 := filepath.Base(dpath)
|
||||||
if !strings.HasPrefix(filepath.Base(dpath), t.req.DatabaseTag) {
|
if !strings.HasPrefix(filepath.Base(dpath), t.req.DatabaseTag) {
|
||||||
dstDatabaseLogPath2 = fmt.Sprintf("%s-%d-%s", t.req.DatabaseTag, t.req.IpIndex+1, filepath.Base(dpath))
|
dstDatabaseLogPath2 = fmt.Sprintf("%s-%d-%s", t.req.DatabaseTag, t.req.IPIndex+1, filepath.Base(dpath))
|
||||||
}
|
}
|
||||||
dstDatabaseLogPath2 = filepath.Join(t.req.Control.GoogleCloudStorageSubDirectory, dstDatabaseLogPath2)
|
dstDatabaseLogPath2 = filepath.Join(t.req.ConfigClientMachineInitial.GoogleCloudStorageSubDirectory, dstDatabaseLogPath2)
|
||||||
plog.Infof("uploading proxy-database log [%q -> %q]", srcDatabaseLogPath2, dstDatabaseLogPath2)
|
plog.Infof("uploading proxy-database log [%q -> %q]", srcDatabaseLogPath2, dstDatabaseLogPath2)
|
||||||
for k := 0; k < 30; k++ {
|
for k := 0; k < 30; k++ {
|
||||||
if uerr = u.UploadFile(t.req.Control.GoogleCloudStorageBucketName, srcDatabaseLogPath2, dstDatabaseLogPath2); uerr != nil {
|
if uerr = u.UploadFile(t.req.ConfigClientMachineInitial.GoogleCloudStorageBucketName, srcDatabaseLogPath2, dstDatabaseLogPath2); uerr != nil {
|
||||||
plog.Warningf("UploadFile error... sleep and retry... (%v)", uerr)
|
plog.Warningf("UploadFile error... sleep and retry... (%v)", uerr)
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
continue
|
continue
|
||||||
|
|
@ -85,12 +85,12 @@ func uploadLog(fs *flags, t *transporterServer) error {
|
||||||
srcSysMetricsDataPath := fs.systemMetricsCSV
|
srcSysMetricsDataPath := fs.systemMetricsCSV
|
||||||
dstSysMetricsDataPath := filepath.Base(fs.systemMetricsCSV)
|
dstSysMetricsDataPath := filepath.Base(fs.systemMetricsCSV)
|
||||||
if !strings.HasPrefix(filepath.Base(fs.systemMetricsCSV), t.req.DatabaseTag) {
|
if !strings.HasPrefix(filepath.Base(fs.systemMetricsCSV), t.req.DatabaseTag) {
|
||||||
dstSysMetricsDataPath = fmt.Sprintf("%s-%d-%s", t.req.DatabaseTag, t.req.IpIndex+1, filepath.Base(fs.systemMetricsCSV))
|
dstSysMetricsDataPath = fmt.Sprintf("%s-%d-%s", t.req.DatabaseTag, t.req.IPIndex+1, filepath.Base(fs.systemMetricsCSV))
|
||||||
}
|
}
|
||||||
dstSysMetricsDataPath = filepath.Join(t.req.Control.GoogleCloudStorageSubDirectory, dstSysMetricsDataPath)
|
dstSysMetricsDataPath = filepath.Join(t.req.ConfigClientMachineInitial.GoogleCloudStorageSubDirectory, dstSysMetricsDataPath)
|
||||||
plog.Infof("uploading system metrics data [%q -> %q]", srcSysMetricsDataPath, dstSysMetricsDataPath)
|
plog.Infof("uploading system metrics data [%q -> %q]", srcSysMetricsDataPath, dstSysMetricsDataPath)
|
||||||
for k := 0; k < 30; k++ {
|
for k := 0; k < 30; k++ {
|
||||||
if uerr := u.UploadFile(t.req.Control.GoogleCloudStorageBucketName, srcSysMetricsDataPath, dstSysMetricsDataPath); uerr != nil {
|
if uerr := u.UploadFile(t.req.ConfigClientMachineInitial.GoogleCloudStorageBucketName, srcSysMetricsDataPath, dstSysMetricsDataPath); uerr != nil {
|
||||||
plog.Warningf("upload error... sleep and retry... (%v)", uerr)
|
plog.Warningf("upload error... sleep and retry... (%v)", uerr)
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
continue
|
continue
|
||||||
|
|
@ -107,12 +107,12 @@ func uploadLog(fs *flags, t *transporterServer) error {
|
||||||
srcSysMetricsInterpolatedDataPath := fs.systemMetricsCSVInterpolated
|
srcSysMetricsInterpolatedDataPath := fs.systemMetricsCSVInterpolated
|
||||||
dstSysMetricsInterpolatedDataPath := filepath.Base(fs.systemMetricsCSVInterpolated)
|
dstSysMetricsInterpolatedDataPath := filepath.Base(fs.systemMetricsCSVInterpolated)
|
||||||
if !strings.HasPrefix(filepath.Base(fs.systemMetricsCSVInterpolated), t.req.DatabaseTag) {
|
if !strings.HasPrefix(filepath.Base(fs.systemMetricsCSVInterpolated), t.req.DatabaseTag) {
|
||||||
dstSysMetricsInterpolatedDataPath = fmt.Sprintf("%s-%d-%s", t.req.DatabaseTag, t.req.IpIndex+1, filepath.Base(fs.systemMetricsCSVInterpolated))
|
dstSysMetricsInterpolatedDataPath = fmt.Sprintf("%s-%d-%s", t.req.DatabaseTag, t.req.IPIndex+1, filepath.Base(fs.systemMetricsCSVInterpolated))
|
||||||
}
|
}
|
||||||
dstSysMetricsInterpolatedDataPath = filepath.Join(t.req.Control.GoogleCloudStorageSubDirectory, dstSysMetricsInterpolatedDataPath)
|
dstSysMetricsInterpolatedDataPath = filepath.Join(t.req.ConfigClientMachineInitial.GoogleCloudStorageSubDirectory, dstSysMetricsInterpolatedDataPath)
|
||||||
plog.Infof("uploading system metrics interpolated data [%q -> %q]", srcSysMetricsInterpolatedDataPath, dstSysMetricsInterpolatedDataPath)
|
plog.Infof("uploading system metrics interpolated data [%q -> %q]", srcSysMetricsInterpolatedDataPath, dstSysMetricsInterpolatedDataPath)
|
||||||
for k := 0; k < 30; k++ {
|
for k := 0; k < 30; k++ {
|
||||||
if uerr := u.UploadFile(t.req.Control.GoogleCloudStorageBucketName, srcSysMetricsInterpolatedDataPath, dstSysMetricsInterpolatedDataPath); uerr != nil {
|
if uerr := u.UploadFile(t.req.ConfigClientMachineInitial.GoogleCloudStorageBucketName, srcSysMetricsInterpolatedDataPath, dstSysMetricsInterpolatedDataPath); uerr != nil {
|
||||||
plog.Warningf("upload error... sleep and retry... (%v)", uerr)
|
plog.Warningf("upload error... sleep and retry... (%v)", uerr)
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
continue
|
continue
|
||||||
|
|
@ -129,12 +129,12 @@ func uploadLog(fs *flags, t *transporterServer) error {
|
||||||
srcAgentLogPath := fs.agentLog
|
srcAgentLogPath := fs.agentLog
|
||||||
dstAgentLogPath := filepath.Base(fs.agentLog)
|
dstAgentLogPath := filepath.Base(fs.agentLog)
|
||||||
if !strings.HasPrefix(filepath.Base(fs.agentLog), t.req.DatabaseTag) {
|
if !strings.HasPrefix(filepath.Base(fs.agentLog), t.req.DatabaseTag) {
|
||||||
dstAgentLogPath = fmt.Sprintf("%s-%d-%s", t.req.DatabaseTag, t.req.IpIndex+1, filepath.Base(fs.agentLog))
|
dstAgentLogPath = fmt.Sprintf("%s-%d-%s", t.req.DatabaseTag, t.req.IPIndex+1, filepath.Base(fs.agentLog))
|
||||||
}
|
}
|
||||||
dstAgentLogPath = filepath.Join(t.req.Control.GoogleCloudStorageSubDirectory, dstAgentLogPath)
|
dstAgentLogPath = filepath.Join(t.req.ConfigClientMachineInitial.GoogleCloudStorageSubDirectory, dstAgentLogPath)
|
||||||
plog.Infof("uploading agent logs [%q -> %q]", srcAgentLogPath, dstAgentLogPath)
|
plog.Infof("uploading agent logs [%q -> %q]", srcAgentLogPath, dstAgentLogPath)
|
||||||
for k := 0; k < 30; k++ {
|
for k := 0; k < 30; k++ {
|
||||||
if uerr := u.UploadFile(t.req.Control.GoogleCloudStorageBucketName, srcAgentLogPath, dstAgentLogPath); uerr != nil {
|
if uerr := u.UploadFile(t.req.ConfigClientMachineInitial.GoogleCloudStorageBucketName, srcAgentLogPath, dstAgentLogPath); uerr != nil {
|
||||||
plog.Warningf("UploadFile error... sleep and retry... (%v)", uerr)
|
plog.Warningf("UploadFile error... sleep and retry... (%v)", uerr)
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
continue
|
continue
|
||||||
|
|
|
||||||
|
|
@ -16,9 +16,9 @@ package analyze
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"image/color"
|
|
||||||
|
|
||||||
"github.com/coreos/dbtester"
|
"github.com/coreos/dbtester/dbtesterpb"
|
||||||
|
|
||||||
"github.com/gonum/plot"
|
"github.com/gonum/plot"
|
||||||
"github.com/gonum/plot/plotter"
|
"github.com/gonum/plot/plotter"
|
||||||
"github.com/gonum/plot/plotutil"
|
"github.com/gonum/plot/plotutil"
|
||||||
|
|
@ -49,7 +49,7 @@ type triplet struct {
|
||||||
maxCol dataframe.Column
|
maxCol dataframe.Column
|
||||||
}
|
}
|
||||||
|
|
||||||
func (all *allAggregatedData) draw(cfg dbtester.Plot, pairs ...pair) error {
|
func (all *allAggregatedData) draw(cfg dbtesterpb.ConfigAnalyzeMachinePlot, pairs ...pair) error {
|
||||||
// frame now contains
|
// frame now contains
|
||||||
// AVG-LATENCY-MS-etcd-v3.1-go1.7.4, AVG-LATENCY-MS-zookeeper-r3.4.9-java8, AVG-LATENCY-MS-consul-v0.7.2-go1.7.4
|
// AVG-LATENCY-MS-etcd-v3.1-go1.7.4, AVG-LATENCY-MS-zookeeper-r3.4.9-java8, AVG-LATENCY-MS-consul-v0.7.2-go1.7.4
|
||||||
plt, err := plot.New()
|
plt, err := plot.New()
|
||||||
|
|
@ -72,7 +72,7 @@ func (all *allAggregatedData) draw(cfg dbtester.Plot, pairs ...pair) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
l.Color = getRGB(all.headerToDatabaseID[p.y.Header()], i)
|
l.Color = dbtesterpb.GetRGBI(all.headerToDatabaseID[p.y.Header()], i)
|
||||||
l.Dashes = plotutil.Dashes(i)
|
l.Dashes = plotutil.Dashes(i)
|
||||||
ps = append(ps, l)
|
ps = append(ps, l)
|
||||||
|
|
||||||
|
|
@ -88,7 +88,7 @@ func (all *allAggregatedData) draw(cfg dbtester.Plot, pairs ...pair) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (all *allAggregatedData) drawXY(cfg dbtester.Plot, pairs ...pair) error {
|
func (all *allAggregatedData) drawXY(cfg dbtesterpb.ConfigAnalyzeMachinePlot, pairs ...pair) error {
|
||||||
// frame now contains
|
// frame now contains
|
||||||
// KEYS-DB-TAG-X, AVG-LATENCY-MS-DB-TAG-Y, ...
|
// KEYS-DB-TAG-X, AVG-LATENCY-MS-DB-TAG-Y, ...
|
||||||
plt, err := plot.New()
|
plt, err := plot.New()
|
||||||
|
|
@ -111,7 +111,7 @@ func (all *allAggregatedData) drawXY(cfg dbtester.Plot, pairs ...pair) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
l.Color = getRGB(all.headerToDatabaseID[p.y.Header()], i)
|
l.Color = dbtesterpb.GetRGBI(all.headerToDatabaseID[p.y.Header()], i)
|
||||||
l.Dashes = plotutil.Dashes(i)
|
l.Dashes = plotutil.Dashes(i)
|
||||||
ps = append(ps, l)
|
ps = append(ps, l)
|
||||||
|
|
||||||
|
|
@ -127,7 +127,7 @@ func (all *allAggregatedData) drawXY(cfg dbtester.Plot, pairs ...pair) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (all *allAggregatedData) drawXYWithErrorPoints(cfg dbtester.Plot, triplets ...triplet) error {
|
func (all *allAggregatedData) drawXYWithErrorPoints(cfg dbtesterpb.ConfigAnalyzeMachinePlot, triplets ...triplet) error {
|
||||||
// frame now contains
|
// frame now contains
|
||||||
// KEYS-DB-TAG-X, MIN-LATENCY-MS-DB-TAG-Y, AVG-LATENCY-MS-DB-TAG-Y, MAX-LATENCY-MS-DB-TAG-Y, ...
|
// KEYS-DB-TAG-X, MIN-LATENCY-MS-DB-TAG-Y, AVG-LATENCY-MS-DB-TAG-Y, MAX-LATENCY-MS-DB-TAG-Y, ...
|
||||||
plt, err := plot.New()
|
plt, err := plot.New()
|
||||||
|
|
@ -150,7 +150,7 @@ func (all *allAggregatedData) drawXYWithErrorPoints(cfg dbtester.Plot, triplets
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
l.Color = getRGBII(all.headerToDatabaseID[triplet.avgCol.Header()], i)
|
l.Color = dbtesterpb.GetRGBII(all.headerToDatabaseID[triplet.avgCol.Header()], i)
|
||||||
l.Dashes = plotutil.Dashes(i)
|
l.Dashes = plotutil.Dashes(i)
|
||||||
ps = append(ps, l)
|
ps = append(ps, l)
|
||||||
plt.Legend.Add(all.headerToDatabaseDescription[triplet.avgCol.Header()]+" MIN", l)
|
plt.Legend.Add(all.headerToDatabaseDescription[triplet.avgCol.Header()]+" MIN", l)
|
||||||
|
|
@ -164,7 +164,7 @@ func (all *allAggregatedData) drawXYWithErrorPoints(cfg dbtester.Plot, triplets
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
l.Color = getRGB(all.headerToDatabaseID[triplet.avgCol.Header()], i)
|
l.Color = dbtesterpb.GetRGBI(all.headerToDatabaseID[triplet.avgCol.Header()], i)
|
||||||
l.Dashes = plotutil.Dashes(i)
|
l.Dashes = plotutil.Dashes(i)
|
||||||
ps = append(ps, l)
|
ps = append(ps, l)
|
||||||
plt.Legend.Add(all.headerToDatabaseDescription[triplet.avgCol.Header()], l)
|
plt.Legend.Add(all.headerToDatabaseDescription[triplet.avgCol.Header()], l)
|
||||||
|
|
@ -178,7 +178,7 @@ func (all *allAggregatedData) drawXYWithErrorPoints(cfg dbtester.Plot, triplets
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
l.Color = getRGBIII(all.headerToDatabaseID[triplet.avgCol.Header()], i)
|
l.Color = dbtesterpb.GetRGBIII(all.headerToDatabaseID[triplet.avgCol.Header()], i)
|
||||||
l.Dashes = plotutil.Dashes(i)
|
l.Dashes = plotutil.Dashes(i)
|
||||||
ps = append(ps, l)
|
ps = append(ps, l)
|
||||||
plt.Legend.Add(all.headerToDatabaseDescription[triplet.avgCol.Header()]+" MAX", l)
|
plt.Legend.Add(all.headerToDatabaseDescription[triplet.avgCol.Header()]+" MAX", l)
|
||||||
|
|
@ -244,63 +244,3 @@ func pointsXY(colX, colY dataframe.Column) (plotter.XYs, error) {
|
||||||
}
|
}
|
||||||
return pts, nil
|
return pts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRGB(databaseID string, i int) color.Color {
|
|
||||||
switch databaseID {
|
|
||||||
case "etcdv2":
|
|
||||||
return color.RGBA{218, 97, 229, 255} // purple
|
|
||||||
case "etcdv3":
|
|
||||||
return color.RGBA{24, 90, 169, 255} // blue
|
|
||||||
case "etcdtip":
|
|
||||||
return color.RGBA{0, 229, 255, 255} // cyan
|
|
||||||
case "zookeeper":
|
|
||||||
return color.RGBA{38, 169, 24, 255} // green
|
|
||||||
case "consul":
|
|
||||||
return color.RGBA{198, 53, 53, 255} // red
|
|
||||||
case "zetcd":
|
|
||||||
return color.RGBA{251, 206, 0, 255} // yellow
|
|
||||||
case "cetcd":
|
|
||||||
return color.RGBA{205, 220, 57, 255} // lime
|
|
||||||
}
|
|
||||||
return plotutil.Color(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRGBII(databaseID string, i int) color.Color {
|
|
||||||
switch databaseID {
|
|
||||||
case "etcdv2":
|
|
||||||
return color.RGBA{229, 212, 231, 255} // light-purple
|
|
||||||
case "etcdv3":
|
|
||||||
return color.RGBA{129, 212, 247, 255} // light-blue
|
|
||||||
case "etcdtip":
|
|
||||||
return color.RGBA{132, 255, 255, 255} // light-cyan
|
|
||||||
case "zookeeper":
|
|
||||||
return color.RGBA{129, 247, 152, 255} // light-green
|
|
||||||
case "consul":
|
|
||||||
return color.RGBA{247, 156, 156, 255} // light-red
|
|
||||||
case "zetcd":
|
|
||||||
return color.RGBA{245, 247, 166, 255} // light-yellow
|
|
||||||
case "cetcd":
|
|
||||||
return color.RGBA{238, 255, 65, 255} // light-lime
|
|
||||||
}
|
|
||||||
return plotutil.Color(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRGBIII(databaseID string, i int) color.Color {
|
|
||||||
switch databaseID {
|
|
||||||
case "etcdv2":
|
|
||||||
return color.RGBA{165, 8, 180, 255} // deep-purple
|
|
||||||
case "etcdv3":
|
|
||||||
return color.RGBA{37, 29, 191, 255} // deep-blue
|
|
||||||
case "etcdtip":
|
|
||||||
return color.RGBA{0, 96, 100, 255} // deep-cyan
|
|
||||||
case "zookeeper":
|
|
||||||
return color.RGBA{7, 64, 35, 255} // deep-green
|
|
||||||
case "consul":
|
|
||||||
return color.RGBA{212, 8, 46, 255} // deep-red
|
|
||||||
case "zetcd":
|
|
||||||
return color.RGBA{229, 255, 0, 255} // deep-yellow
|
|
||||||
case "cetcd":
|
|
||||||
return color.RGBA{205, 220, 57, 255} // deep-lime
|
|
||||||
}
|
|
||||||
return plotutil.Color(i)
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/coreos/dbtester"
|
"github.com/coreos/dbtester"
|
||||||
|
"github.com/coreos/dbtester/dbtesterpb"
|
||||||
humanize "github.com/dustin/go-humanize"
|
humanize "github.com/dustin/go-humanize"
|
||||||
"github.com/gyuho/dataframe"
|
"github.com/gyuho/dataframe"
|
||||||
"github.com/olekukonko/tablewriter"
|
"github.com/olekukonko/tablewriter"
|
||||||
|
|
@ -64,14 +65,14 @@ func do(configPath string) error {
|
||||||
|
|
||||||
all := &allAggregatedData{
|
all := &allAggregatedData{
|
||||||
title: cfg.TestTitle,
|
title: cfg.TestTitle,
|
||||||
data: make([]*analyzeData, 0, len(cfg.DatabaseIDToTestData)),
|
data: make([]*analyzeData, 0, len(cfg.DatabaseIDToConfigAnalyzeMachineInitial)),
|
||||||
headerToDatabaseID: make(map[string]string),
|
headerToDatabaseID: make(map[string]string),
|
||||||
headerToDatabaseDescription: make(map[string]string),
|
headerToDatabaseDescription: make(map[string]string),
|
||||||
allDatabaseIDList: cfg.AllDatabaseIDList,
|
allDatabaseIDList: cfg.AllDatabaseIDList,
|
||||||
}
|
}
|
||||||
for _, databaseID := range cfg.AllDatabaseIDList {
|
for _, databaseID := range cfg.AllDatabaseIDList {
|
||||||
testgroup := cfg.DatabaseIDToTestGroup[databaseID]
|
testgroup := cfg.DatabaseIDToConfigClientMachineAgentControl[databaseID]
|
||||||
testdata := cfg.DatabaseIDToTestData[databaseID]
|
testdata := cfg.DatabaseIDToConfigAnalyzeMachineInitial[databaseID]
|
||||||
|
|
||||||
plog.Printf("reading system metrics data for %s", databaseID)
|
plog.Printf("reading system metrics data for %s", databaseID)
|
||||||
ad, err := readSystemMetricsAll(testdata.ServerSystemMetricsInterpolatedPathList...)
|
ad, err := readSystemMetricsAll(testdata.ServerSystemMetricsInterpolatedPathList...)
|
||||||
|
|
@ -88,7 +89,7 @@ func do(configPath string) error {
|
||||||
if err = ad.importBenchMetrics(testdata.ClientLatencyThroughputTimeseriesPath); err != nil {
|
if err = ad.importBenchMetrics(testdata.ClientLatencyThroughputTimeseriesPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = ad.aggregateAll(testdata.ServerMemoryByKeyNumberPath, testdata.ServerReadBytesDeltaByKeyNumberPath, testdata.ServerWriteBytesDeltaByKeyNumberPath, testgroup.RequestNumber); err != nil {
|
if err = ad.aggregateAll(testdata.ServerMemoryByKeyNumberPath, testdata.ServerReadBytesDeltaByKeyNumberPath, testdata.ServerWriteBytesDeltaByKeyNumberPath, testgroup.ConfigClientMachineBenchmarkOptions.RequestNumber); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = ad.save(); err != nil {
|
if err = ad.save(); err != nil {
|
||||||
|
|
@ -114,7 +115,7 @@ func do(configPath string) error {
|
||||||
// per database
|
// per database
|
||||||
for _, col := range ad.aggregated.Columns() {
|
for _, col := range ad.aggregated.Columns() {
|
||||||
databaseID := all.headerToDatabaseID[col.Header()]
|
databaseID := all.headerToDatabaseID[col.Header()]
|
||||||
row00Header = append(row00Header, cfg.DatabaseIDToTestGroup[databaseID].DatabaseTag)
|
row00Header = append(row00Header, cfg.DatabaseIDToConfigClientMachineAgentControl[databaseID].DatabaseTag)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -272,14 +273,14 @@ func do(configPath string) error {
|
||||||
|
|
||||||
databaseIDToErrs := make(map[string][]string)
|
databaseIDToErrs := make(map[string][]string)
|
||||||
for i, databaseID := range cfg.AllDatabaseIDList {
|
for i, databaseID := range cfg.AllDatabaseIDList {
|
||||||
testgroup := cfg.DatabaseIDToTestGroup[databaseID]
|
testgroup := cfg.DatabaseIDToConfigClientMachineAgentControl[databaseID]
|
||||||
testdata := cfg.DatabaseIDToTestData[databaseID]
|
testdata := cfg.DatabaseIDToConfigAnalyzeMachineInitial[databaseID]
|
||||||
|
|
||||||
tag := testdata.DatabaseTag
|
tag := testdata.DatabaseTag
|
||||||
if tag != row00Header[i+1] {
|
if tag != row00Header[i+1] {
|
||||||
return fmt.Errorf("analyze config has different order; expected %q, got %q", row00Header[i+1], tag)
|
return fmt.Errorf("analyze config has different order; expected %q, got %q", row00Header[i+1], tag)
|
||||||
}
|
}
|
||||||
row02TotalRequestNumber = append(row02TotalRequestNumber, humanize.Comma(testgroup.RequestNumber))
|
row02TotalRequestNumber = append(row02TotalRequestNumber, humanize.Comma(testgroup.ConfigClientMachineBenchmarkOptions.RequestNumber))
|
||||||
|
|
||||||
{
|
{
|
||||||
fr, err := dataframe.NewFromCSV(nil, testdata.ClientSystemMetricsInterpolatedPath)
|
fr, err := dataframe.NewFromCSV(nil, testdata.ClientSystemMetricsInterpolatedPath)
|
||||||
|
|
@ -515,7 +516,7 @@ func do(configPath string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
plog.Printf("saving summary data to %q", cfg.Analyze.AllAggregatedOutputPathCSV)
|
plog.Printf("saving summary data to %q", cfg.ConfigAnalyzeMachineAllAggregatedOutput.AllAggregatedOutputPathCSV)
|
||||||
aggRowsForSummaryCSV := [][]string{
|
aggRowsForSummaryCSV := [][]string{
|
||||||
row00Header,
|
row00Header,
|
||||||
row01TotalSeconds,
|
row01TotalSeconds,
|
||||||
|
|
@ -559,7 +560,7 @@ func do(configPath string) error {
|
||||||
row29SectorsWrittenDeltaSum,
|
row29SectorsWrittenDeltaSum,
|
||||||
row30AvgDiskSpaceUsage,
|
row30AvgDiskSpaceUsage,
|
||||||
}
|
}
|
||||||
file, err := openToOverwrite(cfg.Analyze.AllAggregatedOutputPathCSV)
|
file, err := openToOverwrite(cfg.ConfigAnalyzeMachineAllAggregatedOutput.AllAggregatedOutputPathCSV)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -573,7 +574,7 @@ func do(configPath string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
plog.Printf("saving summary data to %q", cfg.Analyze.AllAggregatedOutputPathTXT)
|
plog.Printf("saving summary data to %q", cfg.ConfigAnalyzeMachineAllAggregatedOutput.AllAggregatedOutputPathTXT)
|
||||||
aggRowsForSummaryTXT := [][]string{
|
aggRowsForSummaryTXT := [][]string{
|
||||||
row00Header,
|
row00Header,
|
||||||
row01TotalSeconds,
|
row01TotalSeconds,
|
||||||
|
|
@ -634,7 +635,7 @@ func do(configPath string) error {
|
||||||
if errs != "" {
|
if errs != "" {
|
||||||
stxt += "\n" + "\n" + errs
|
stxt += "\n" + "\n" + errs
|
||||||
}
|
}
|
||||||
if err := toFile(stxt, changeExtToTxt(cfg.Analyze.AllAggregatedOutputPathTXT)); err != nil {
|
if err := toFile(stxt, changeExtToTxt(cfg.ConfigAnalyzeMachineAllAggregatedOutput.AllAggregatedOutputPathTXT)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -642,7 +643,7 @@ func do(configPath string) error {
|
||||||
plog.Info("combining all latency data by keys")
|
plog.Info("combining all latency data by keys")
|
||||||
allLatencyFrame := dataframe.New()
|
allLatencyFrame := dataframe.New()
|
||||||
for _, databaseID := range cfg.AllDatabaseIDList {
|
for _, databaseID := range cfg.AllDatabaseIDList {
|
||||||
testdata := cfg.DatabaseIDToTestData[databaseID]
|
testdata := cfg.DatabaseIDToConfigAnalyzeMachineInitial[databaseID]
|
||||||
|
|
||||||
fr, err := dataframe.NewFromCSV(nil, testdata.ClientLatencyByKeyNumberPath)
|
fr, err := dataframe.NewFromCSV(nil, testdata.ClientLatencyByKeyNumberPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -688,7 +689,7 @@ func do(configPath string) error {
|
||||||
plog.Info("combining all server memory usage by keys")
|
plog.Info("combining all server memory usage by keys")
|
||||||
allMemoryFrame := dataframe.New()
|
allMemoryFrame := dataframe.New()
|
||||||
for _, databaseID := range cfg.AllDatabaseIDList {
|
for _, databaseID := range cfg.AllDatabaseIDList {
|
||||||
testdata := cfg.DatabaseIDToTestData[databaseID]
|
testdata := cfg.DatabaseIDToConfigAnalyzeMachineInitial[databaseID]
|
||||||
|
|
||||||
fr, err := dataframe.NewFromCSV(nil, testdata.ServerMemoryByKeyNumberPath)
|
fr, err := dataframe.NewFromCSV(nil, testdata.ServerMemoryByKeyNumberPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -734,7 +735,7 @@ func do(configPath string) error {
|
||||||
plog.Info("combining all server read bytes delta by keys")
|
plog.Info("combining all server read bytes delta by keys")
|
||||||
allReadBytesDeltaFrame := dataframe.New()
|
allReadBytesDeltaFrame := dataframe.New()
|
||||||
for _, databaseID := range cfg.AllDatabaseIDList {
|
for _, databaseID := range cfg.AllDatabaseIDList {
|
||||||
testdata := cfg.DatabaseIDToTestData[databaseID]
|
testdata := cfg.DatabaseIDToConfigAnalyzeMachineInitial[databaseID]
|
||||||
|
|
||||||
fr, err := dataframe.NewFromCSV(nil, testdata.ServerReadBytesDeltaByKeyNumberPath)
|
fr, err := dataframe.NewFromCSV(nil, testdata.ServerReadBytesDeltaByKeyNumberPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -771,7 +772,7 @@ func do(configPath string) error {
|
||||||
plog.Info("combining all server write bytes delta by keys")
|
plog.Info("combining all server write bytes delta by keys")
|
||||||
allWriteBytesDeltaFrame := dataframe.New()
|
allWriteBytesDeltaFrame := dataframe.New()
|
||||||
for _, databaseID := range cfg.AllDatabaseIDList {
|
for _, databaseID := range cfg.AllDatabaseIDList {
|
||||||
testdata := cfg.DatabaseIDToTestData[databaseID]
|
testdata := cfg.DatabaseIDToConfigAnalyzeMachineInitial[databaseID]
|
||||||
|
|
||||||
fr, err := dataframe.NewFromCSV(nil, testdata.ServerWriteBytesDeltaByKeyNumberPath)
|
fr, err := dataframe.NewFromCSV(nil, testdata.ServerWriteBytesDeltaByKeyNumberPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -806,14 +807,14 @@ func do(configPath string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
allLatencyFrameCfg := dbtester.Plot{
|
allLatencyFrameCfg := dbtesterpb.ConfigAnalyzeMachinePlot{
|
||||||
Column: "AVG-LATENCY-MS",
|
Column: "AVG-LATENCY-MS",
|
||||||
XAxis: "Cumulative Number of Keys",
|
XAxis: "Cumulative Number of Keys",
|
||||||
YAxis: "Latency(millisecond) by Keys",
|
YAxis: "Latency(millisecond) by Keys",
|
||||||
OutputPathList: make([]string, len(cfg.PlotList[0].OutputPathList)),
|
OutputPathList: make([]string, len(cfg.AnalyzePlotList[0].OutputPathList)),
|
||||||
}
|
}
|
||||||
allLatencyFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-LATENCY-MS-BY-KEY.svg")
|
allLatencyFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-LATENCY-MS-BY-KEY.svg")
|
||||||
allLatencyFrameCfg.OutputPathList[1] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-LATENCY-MS-BY-KEY.png")
|
allLatencyFrameCfg.OutputPathList[1] = filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-LATENCY-MS-BY-KEY.png")
|
||||||
plog.Printf("plotting %v", allLatencyFrameCfg.OutputPathList)
|
plog.Printf("plotting %v", allLatencyFrameCfg.OutputPathList)
|
||||||
var pairs []pair
|
var pairs []pair
|
||||||
allCols := allLatencyFrame.Columns()
|
allCols := allLatencyFrame.Columns()
|
||||||
|
|
@ -835,21 +836,21 @@ func do(configPath string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
csvPath := filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-LATENCY-MS-BY-KEY.csv")
|
csvPath := filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-LATENCY-MS-BY-KEY.csv")
|
||||||
if err := newCSV.CSV(csvPath); err != nil {
|
if err := newCSV.CSV(csvPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
// with error points
|
// with error points
|
||||||
allLatencyFrameCfg := dbtester.Plot{
|
allLatencyFrameCfg := dbtesterpb.ConfigAnalyzeMachinePlot{
|
||||||
Column: "AVG-LATENCY-MS",
|
Column: "AVG-LATENCY-MS",
|
||||||
XAxis: "Cumulative Number of Keys",
|
XAxis: "Cumulative Number of Keys",
|
||||||
YAxis: "Latency(millisecond) by Keys",
|
YAxis: "Latency(millisecond) by Keys",
|
||||||
OutputPathList: make([]string, len(cfg.PlotList[0].OutputPathList)),
|
OutputPathList: make([]string, len(cfg.AnalyzePlotList[0].OutputPathList)),
|
||||||
}
|
}
|
||||||
allLatencyFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.svg")
|
allLatencyFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.svg")
|
||||||
allLatencyFrameCfg.OutputPathList[1] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.png")
|
allLatencyFrameCfg.OutputPathList[1] = filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.png")
|
||||||
plog.Printf("plotting %v", allLatencyFrameCfg.OutputPathList)
|
plog.Printf("plotting %v", allLatencyFrameCfg.OutputPathList)
|
||||||
var triplets []triplet
|
var triplets []triplet
|
||||||
allCols := allLatencyFrame.Columns()
|
allCols := allLatencyFrame.Columns()
|
||||||
|
|
@ -879,20 +880,20 @@ func do(configPath string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
csvPath := filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.csv")
|
csvPath := filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.csv")
|
||||||
if err := newCSV.CSV(csvPath); err != nil {
|
if err := newCSV.CSV(csvPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
allMemoryFrameCfg := dbtester.Plot{
|
allMemoryFrameCfg := dbtesterpb.ConfigAnalyzeMachinePlot{
|
||||||
Column: "AVG-VMRSS-MB",
|
Column: "AVG-VMRSS-MB",
|
||||||
XAxis: "Cumulative Number of Keys",
|
XAxis: "Cumulative Number of Keys",
|
||||||
YAxis: "Memory(MB) by Keys",
|
YAxis: "Memory(MB) by Keys",
|
||||||
OutputPathList: make([]string, len(cfg.PlotList[0].OutputPathList)),
|
OutputPathList: make([]string, len(cfg.AnalyzePlotList[0].OutputPathList)),
|
||||||
}
|
}
|
||||||
allMemoryFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-VMRSS-MB-BY-KEY.svg")
|
allMemoryFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-VMRSS-MB-BY-KEY.svg")
|
||||||
allMemoryFrameCfg.OutputPathList[1] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-VMRSS-MB-BY-KEY.png")
|
allMemoryFrameCfg.OutputPathList[1] = filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-VMRSS-MB-BY-KEY.png")
|
||||||
plog.Printf("plotting %v", allMemoryFrameCfg.OutputPathList)
|
plog.Printf("plotting %v", allMemoryFrameCfg.OutputPathList)
|
||||||
var pairs []pair
|
var pairs []pair
|
||||||
allCols := allMemoryFrame.Columns()
|
allCols := allMemoryFrame.Columns()
|
||||||
|
|
@ -914,21 +915,21 @@ func do(configPath string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
csvPath := filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-VMRSS-MB-BY-KEY.csv")
|
csvPath := filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-VMRSS-MB-BY-KEY.csv")
|
||||||
if err := newCSV.CSV(csvPath); err != nil {
|
if err := newCSV.CSV(csvPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
// with error points
|
// with error points
|
||||||
allMemoryFrameCfg := dbtester.Plot{
|
allMemoryFrameCfg := dbtesterpb.ConfigAnalyzeMachinePlot{
|
||||||
Column: "AVG-VMRSS-MB",
|
Column: "AVG-VMRSS-MB",
|
||||||
XAxis: "Cumulative Number of Keys",
|
XAxis: "Cumulative Number of Keys",
|
||||||
YAxis: "Memory(MB) by Keys",
|
YAxis: "Memory(MB) by Keys",
|
||||||
OutputPathList: make([]string, len(cfg.PlotList[0].OutputPathList)),
|
OutputPathList: make([]string, len(cfg.AnalyzePlotList[0].OutputPathList)),
|
||||||
}
|
}
|
||||||
allMemoryFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.svg")
|
allMemoryFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.svg")
|
||||||
allMemoryFrameCfg.OutputPathList[1] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.png")
|
allMemoryFrameCfg.OutputPathList[1] = filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.png")
|
||||||
plog.Printf("plotting %v", allMemoryFrameCfg.OutputPathList)
|
plog.Printf("plotting %v", allMemoryFrameCfg.OutputPathList)
|
||||||
var triplets []triplet
|
var triplets []triplet
|
||||||
allCols := allMemoryFrame.Columns()
|
allCols := allMemoryFrame.Columns()
|
||||||
|
|
@ -958,20 +959,20 @@ func do(configPath string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
csvPath := filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.csv")
|
csvPath := filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.csv")
|
||||||
if err := newCSV.CSV(csvPath); err != nil {
|
if err := newCSV.CSV(csvPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
allReadBytesDeltaFrameCfg := dbtester.Plot{
|
allReadBytesDeltaFrameCfg := dbtesterpb.ConfigAnalyzeMachinePlot{
|
||||||
Column: "AVG-READ-BYTES-NUM-DELTA",
|
Column: "AVG-READ-BYTES-NUM-DELTA",
|
||||||
XAxis: "Cumulative Number of Keys",
|
XAxis: "Cumulative Number of Keys",
|
||||||
YAxis: "Average Read Bytes Delta by Keys",
|
YAxis: "Average Read Bytes Delta by Keys",
|
||||||
OutputPathList: make([]string, len(cfg.PlotList[0].OutputPathList)),
|
OutputPathList: make([]string, len(cfg.AnalyzePlotList[0].OutputPathList)),
|
||||||
}
|
}
|
||||||
allReadBytesDeltaFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-READ-BYTES-NUM-DELTA-BY-KEY.svg")
|
allReadBytesDeltaFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-READ-BYTES-NUM-DELTA-BY-KEY.svg")
|
||||||
allReadBytesDeltaFrameCfg.OutputPathList[1] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-READ-BYTES-NUM-DELTA-BY-KEY.png")
|
allReadBytesDeltaFrameCfg.OutputPathList[1] = filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-READ-BYTES-NUM-DELTA-BY-KEY.png")
|
||||||
plog.Printf("plotting %v", allReadBytesDeltaFrameCfg.OutputPathList)
|
plog.Printf("plotting %v", allReadBytesDeltaFrameCfg.OutputPathList)
|
||||||
var pairs []pair
|
var pairs []pair
|
||||||
allCols := allReadBytesDeltaFrame.Columns()
|
allCols := allReadBytesDeltaFrame.Columns()
|
||||||
|
|
@ -984,20 +985,20 @@ func do(configPath string) error {
|
||||||
if err = all.drawXY(allReadBytesDeltaFrameCfg, pairs...); err != nil {
|
if err = all.drawXY(allReadBytesDeltaFrameCfg, pairs...); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
csvPath := filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-READ-BYTES-NUM-DELTA-BY-KEY.csv")
|
csvPath := filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-READ-BYTES-NUM-DELTA-BY-KEY.csv")
|
||||||
if err := allReadBytesDeltaFrame.CSV(csvPath); err != nil {
|
if err := allReadBytesDeltaFrame.CSV(csvPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
allWriteBytesDeltaFrameCfg := dbtester.Plot{
|
allWriteBytesDeltaFrameCfg := dbtesterpb.ConfigAnalyzeMachinePlot{
|
||||||
Column: "AVG-WRITE-BYTES-NUM-DELTA",
|
Column: "AVG-WRITE-BYTES-NUM-DELTA",
|
||||||
XAxis: "Cumulative Number of Keys",
|
XAxis: "Cumulative Number of Keys",
|
||||||
YAxis: "Average Write Bytes Delta by Keys",
|
YAxis: "Average Write Bytes Delta by Keys",
|
||||||
OutputPathList: make([]string, len(cfg.PlotList[0].OutputPathList)),
|
OutputPathList: make([]string, len(cfg.AnalyzePlotList[0].OutputPathList)),
|
||||||
}
|
}
|
||||||
allWriteBytesDeltaFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-WRITE-BYTES-NUM-DELTA-BY-KEY.svg")
|
allWriteBytesDeltaFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-WRITE-BYTES-NUM-DELTA-BY-KEY.svg")
|
||||||
allWriteBytesDeltaFrameCfg.OutputPathList[1] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-WRITE-BYTES-NUM-DELTA-BY-KEY.png")
|
allWriteBytesDeltaFrameCfg.OutputPathList[1] = filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-WRITE-BYTES-NUM-DELTA-BY-KEY.png")
|
||||||
plog.Printf("plotting %v", allWriteBytesDeltaFrameCfg.OutputPathList)
|
plog.Printf("plotting %v", allWriteBytesDeltaFrameCfg.OutputPathList)
|
||||||
var pairs []pair
|
var pairs []pair
|
||||||
allCols := allWriteBytesDeltaFrame.Columns()
|
allCols := allWriteBytesDeltaFrame.Columns()
|
||||||
|
|
@ -1010,21 +1011,21 @@ func do(configPath string) error {
|
||||||
if err = all.drawXY(allWriteBytesDeltaFrameCfg, pairs...); err != nil {
|
if err = all.drawXY(allWriteBytesDeltaFrameCfg, pairs...); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
csvPath := filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-WRITE-BYTES-NUM-DELTA-BY-KEY.csv")
|
csvPath := filepath.Join(filepath.Dir(cfg.AnalyzePlotList[0].OutputPathList[0]), "AVG-WRITE-BYTES-NUM-DELTA-BY-KEY.csv")
|
||||||
if err := allWriteBytesDeltaFrame.CSV(csvPath); err != nil {
|
if err := allWriteBytesDeltaFrame.CSV(csvPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
plog.Println("combining data for plotting")
|
plog.Println("combining data for plotting")
|
||||||
for _, plotConfig := range cfg.PlotList {
|
for _, plotConfig := range cfg.AnalyzePlotList {
|
||||||
plog.Printf("plotting %q", plotConfig.Column)
|
plog.Printf("plotting %q", plotConfig.Column)
|
||||||
var clientNumColumns []dataframe.Column
|
var clientNumColumns []dataframe.Column
|
||||||
var pairs []pair
|
var pairs []pair
|
||||||
var dataColumns []dataframe.Column
|
var dataColumns []dataframe.Column
|
||||||
for i, ad := range all.data {
|
for i, ad := range all.data {
|
||||||
databaseID := all.allDatabaseIDList[i]
|
databaseID := all.allDatabaseIDList[i]
|
||||||
tag := cfg.DatabaseIDToTestGroup[databaseID].DatabaseTag
|
tag := cfg.DatabaseIDToConfigClientMachineAgentControl[databaseID].DatabaseTag
|
||||||
|
|
||||||
avgCol, err := ad.aggregated.Column("CONTROL-CLIENT-NUM")
|
avgCol, err := ad.aggregated.Column("CONTROL-CLIENT-NUM")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -1076,7 +1077,7 @@ func do(configPath string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(cfg.DatabaseIDToTestGroup[cfg.AllDatabaseIDList[0]].BenchmarkOptions.ConnectionClientNumbers) > 0 {
|
if len(cfg.DatabaseIDToConfigClientMachineAgentControl[cfg.AllDatabaseIDList[0]].ConfigClientMachineBenchmarkOptions.ConnectionClientNumbers) > 0 {
|
||||||
plog.Printf("aggregating data for %q of all database (by client number)", plotConfig.Column)
|
plog.Printf("aggregating data for %q of all database (by client number)", plotConfig.Column)
|
||||||
nf3 := dataframe.New()
|
nf3 := dataframe.New()
|
||||||
var firstKeys []int
|
var firstKeys []int
|
||||||
|
|
|
||||||
|
|
@ -24,8 +24,8 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// BroadcaseRequest sends request to all endpoints.
|
// BroadcaseRequest sends request to all endpoints.
|
||||||
func (cfg *Config) BroadcaseRequest(databaseID string, op dbtesterpb.Request_Operation) (map[int]dbtesterpb.Response, error) {
|
func (cfg *Config) BroadcaseRequest(databaseID string, op dbtesterpb.Operation) (map[int]dbtesterpb.Response, error) {
|
||||||
gcfg, ok := cfg.DatabaseIDToTestGroup[databaseID]
|
gcfg, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[databaseID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("database id %q does not exist", databaseID)
|
return nil, fmt.Errorf("database id %q does not exist", databaseID)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,21 +0,0 @@
|
||||||
// Copyright 2017 CoreOS, Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package dbtester
|
|
||||||
|
|
||||||
// Cetcd is cetcd-specific flags
|
|
||||||
// (https://github.com/coreos/cetcd).
|
|
||||||
type Cetcd struct {
|
|
||||||
// no options needed yet
|
|
||||||
}
|
|
||||||
|
|
@ -1,21 +0,0 @@
|
||||||
// Copyright 2017 CoreOS, Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package dbtester
|
|
||||||
|
|
||||||
// Consul is Consul-specific flags
|
|
||||||
// (https://github.com/hashicorp/consul).
|
|
||||||
type Consul struct {
|
|
||||||
// no options needed yet
|
|
||||||
}
|
|
||||||
|
|
@ -21,148 +21,35 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/coreos/dbtester/dbtesterpb"
|
"github.com/coreos/dbtester/dbtesterpb"
|
||||||
|
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// MakeTag converts database description to database tag.
|
||||||
|
func MakeTag(desc string) string {
|
||||||
|
s := strings.ToLower(desc)
|
||||||
|
s = strings.Replace(s, "go ", "go", -1)
|
||||||
|
s = strings.Replace(s, "java ", "java", -1)
|
||||||
|
s = strings.Replace(s, "(", "", -1)
|
||||||
|
s = strings.Replace(s, ")", "", -1)
|
||||||
|
return strings.Replace(s, " ", "-", -1)
|
||||||
|
}
|
||||||
|
|
||||||
// Config configures dbtester control clients.
|
// Config configures dbtester control clients.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
TestTitle string `yaml:"test_title"`
|
TestTitle string `yaml:"test_title"`
|
||||||
TestDescription string `yaml:"test_description"`
|
TestDescription string `yaml:"test_description"`
|
||||||
|
|
||||||
Control `yaml:"control"`
|
dbtesterpb.ConfigClientMachineInitial `yaml:"config_client_machine_initial"`
|
||||||
|
|
||||||
AllDatabaseIDList []string `yaml:"all_database_id_list"`
|
AllDatabaseIDList []string `yaml:"all_database_id_list"`
|
||||||
DatabaseIDToTestGroup map[string]TestGroup `yaml:"datatbase_id_to_test_group"`
|
DatabaseIDToConfigClientMachineAgentControl map[string]dbtesterpb.ConfigClientMachineAgentControl `yaml:"datatbase_id_to_config_client_machine_agent_control"`
|
||||||
DatabaseIDToTestData map[string]TestData `yaml:"datatbase_id_to_test_data"`
|
DatabaseIDToConfigAnalyzeMachineInitial map[string]dbtesterpb.ConfigAnalyzeMachineInitial `yaml:"datatbase_id_to_config_analyze_machine_initial"`
|
||||||
|
|
||||||
Analyze `yaml:"analyze"`
|
dbtesterpb.ConfigAnalyzeMachineAllAggregatedOutput `yaml:"analyze_all_aggregated_output"`
|
||||||
|
AnalyzePlotPathPrefix string `yaml:"analyze_plot_path_prefix"`
|
||||||
PlotPathPrefix string `yaml:"plot_path_prefix"`
|
AnalyzePlotList []dbtesterpb.ConfigAnalyzeMachinePlot `yaml:"analyze_plot_list"`
|
||||||
PlotList []Plot `yaml:"plot_list"`
|
dbtesterpb.ConfigAnalyzeMachineREADME `yaml:"analyze_readme"`
|
||||||
README `yaml:"readme"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Control represents common control options on client machine.
|
|
||||||
type Control struct {
|
|
||||||
PathPrefix string `yaml:"path_prefix"`
|
|
||||||
LogPath string `yaml:"log_path"`
|
|
||||||
ClientSystemMetricsPath string `yaml:"client_system_metrics_path"`
|
|
||||||
ClientSystemMetricsInterpolatedPath string `yaml:"client_system_metrics_interpolated_path"`
|
|
||||||
ClientLatencyThroughputTimeseriesPath string `yaml:"client_latency_throughput_timeseries_path"`
|
|
||||||
ClientLatencyDistributionAllPath string `yaml:"client_latency_distribution_all_path"`
|
|
||||||
ClientLatencyDistributionPercentilePath string `yaml:"client_latency_distribution_percentile_path"`
|
|
||||||
ClientLatencyDistributionSummaryPath string `yaml:"client_latency_distribution_summary_path"`
|
|
||||||
ClientLatencyByKeyNumberPath string `yaml:"client_latency_by_key_number_path"`
|
|
||||||
ServerDiskSpaceUsageSummaryPath string `yaml:"server_disk_space_usage_summary_path"`
|
|
||||||
|
|
||||||
GoogleCloudProjectName string `yaml:"google_cloud_project_name"`
|
|
||||||
GoogleCloudStorageKeyPath string `yaml:"google_cloud_storage_key_path"`
|
|
||||||
GoogleCloudStorageKey string
|
|
||||||
GoogleCloudStorageBucketName string `yaml:"google_cloud_storage_bucket_name"`
|
|
||||||
GoogleCloudStorageSubDirectory string `yaml:"google_cloud_storage_sub_directory"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestGroup specifies database test group.
|
|
||||||
type TestGroup struct {
|
|
||||||
DatabaseID string
|
|
||||||
DatabaseDescription string `yaml:"database_description"`
|
|
||||||
DatabaseTag string
|
|
||||||
|
|
||||||
PeerIPs []string `yaml:"peer_ips"`
|
|
||||||
PeerIPsString string
|
|
||||||
|
|
||||||
DatabasePortToConnect int `yaml:"database_port_to_connect"`
|
|
||||||
DatabaseEndpoints []string
|
|
||||||
|
|
||||||
AgentPortToConnect int `yaml:"agent_port_to_connect"`
|
|
||||||
AgentEndpoints []string
|
|
||||||
|
|
||||||
// database-specific flags to start
|
|
||||||
Etcdv2 `yaml:"etcdv2"`
|
|
||||||
Etcdv3 `yaml:"etcdv3"`
|
|
||||||
Zookeeper `yaml:"zookeeper"`
|
|
||||||
Consul `yaml:"consul"`
|
|
||||||
Zetcd `yaml:"zetcd"`
|
|
||||||
Cetcd `yaml:"cetcd"`
|
|
||||||
|
|
||||||
// benchmark options
|
|
||||||
BenchmarkOptions `yaml:"benchmark_options"`
|
|
||||||
BenchmarkSteps `yaml:"benchmark_steps"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkOptions specifies the benchmark options.
|
|
||||||
type BenchmarkOptions struct {
|
|
||||||
Type string `yaml:"type"`
|
|
||||||
|
|
||||||
RequestNumber int64 `yaml:"request_number"`
|
|
||||||
ConnectionNumber int64 `yaml:"connection_number"`
|
|
||||||
ClientNumber int64 `yaml:"client_number"`
|
|
||||||
ConnectionClientNumbers []int64 `yaml:"connection_client_numbers"`
|
|
||||||
RateLimitRequestsPerSecond int64 `yaml:"rate_limit_requests_per_second"`
|
|
||||||
|
|
||||||
// for writes, reads
|
|
||||||
SameKey bool `yaml:"same_key"`
|
|
||||||
KeySizeBytes int64 `yaml:"key_size_bytes"`
|
|
||||||
ValueSizeBytes int64 `yaml:"value_size_bytes"`
|
|
||||||
|
|
||||||
// for reads
|
|
||||||
StaleRead bool `yaml:"stale_read"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkSteps specifies the benchmark workflow.
|
|
||||||
type BenchmarkSteps struct {
|
|
||||||
Step1StartDatabase bool `yaml:"step1_start_database"`
|
|
||||||
Step2StressDatabase bool `yaml:"step2_stress_database"`
|
|
||||||
Step3StopDatabase bool `yaml:"step3_stop_database"`
|
|
||||||
Step4UploadLogs bool `yaml:"step4_upload_logs"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestData defines raw data to import.
|
|
||||||
type TestData struct {
|
|
||||||
DatabaseID string
|
|
||||||
DatabaseTag string
|
|
||||||
DatabaseDescription string
|
|
||||||
|
|
||||||
PathPrefix string `yaml:"path_prefix"`
|
|
||||||
ClientSystemMetricsInterpolatedPath string `yaml:"client_system_metrics_interpolated_path"`
|
|
||||||
ClientLatencyThroughputTimeseriesPath string `yaml:"client_latency_throughput_timeseries_path"`
|
|
||||||
ClientLatencyDistributionAllPath string `yaml:"client_latency_distribution_all_path"`
|
|
||||||
ClientLatencyDistributionPercentilePath string `yaml:"client_latency_distribution_percentile_path"`
|
|
||||||
ClientLatencyDistributionSummaryPath string `yaml:"client_latency_distribution_summary_path"`
|
|
||||||
ClientLatencyByKeyNumberPath string `yaml:"client_latency_by_key_number_path"`
|
|
||||||
ServerDiskSpaceUsageSummaryPath string `yaml:"server_disk_space_usage_summary_path"`
|
|
||||||
ServerMemoryByKeyNumberPath string `yaml:"server_memory_by_key_number_path"`
|
|
||||||
ServerReadBytesDeltaByKeyNumberPath string `yaml:"server_read_bytes_delta_by_key_number_path"`
|
|
||||||
ServerWriteBytesDeltaByKeyNumberPath string `yaml:"server_write_bytes_delta_by_key_number_path"`
|
|
||||||
ServerSystemMetricsInterpolatedPathList []string `yaml:"server_system_metrics_interpolated_path_list"`
|
|
||||||
AllAggregatedOutputPath string `yaml:"all_aggregated_output_path"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Analyze defines analyze config.
|
|
||||||
type Analyze struct {
|
|
||||||
AllAggregatedOutputPathCSV string `yaml:"all_aggregated_output_path_csv"`
|
|
||||||
AllAggregatedOutputPathTXT string `yaml:"all_aggregated_output_path_txt"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Plot defines plot configuration.
|
|
||||||
type Plot struct {
|
|
||||||
Column string `yaml:"column"`
|
|
||||||
XAxis string `yaml:"x_axis"`
|
|
||||||
YAxis string `yaml:"y_axis"`
|
|
||||||
OutputPathCSV string `yaml:"output_path_csv"`
|
|
||||||
OutputPathList []string `yaml:"output_path_list"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// README defines how to write README.
|
|
||||||
type README struct {
|
|
||||||
OutputPath string `yaml:"output_path"`
|
|
||||||
Images []Image `yaml:"images"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Image defines image data.
|
|
||||||
type Image struct {
|
|
||||||
Title string `yaml:"title"`
|
|
||||||
Path string `yaml:"path"`
|
|
||||||
Type string `yaml:"type"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadConfig reads control configuration file.
|
// ReadConfig reads control configuration file.
|
||||||
|
|
@ -176,19 +63,29 @@ func ReadConfig(fpath string, analyze bool) (*Config, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.Control.PathPrefix != "" {
|
for _, id := range cfg.AllDatabaseIDList {
|
||||||
cfg.Control.LogPath = filepath.Join(cfg.Control.PathPrefix, cfg.Control.LogPath)
|
if !dbtesterpb.IsValidDatabaseID(id) {
|
||||||
cfg.Control.ClientSystemMetricsPath = filepath.Join(cfg.Control.PathPrefix, cfg.Control.ClientSystemMetricsPath)
|
return nil, fmt.Errorf("databaseID %q is unknown", id)
|
||||||
cfg.Control.ClientSystemMetricsInterpolatedPath = filepath.Join(cfg.Control.PathPrefix, cfg.Control.ClientSystemMetricsInterpolatedPath)
|
}
|
||||||
cfg.Control.ClientLatencyThroughputTimeseriesPath = filepath.Join(cfg.Control.PathPrefix, cfg.Control.ClientLatencyThroughputTimeseriesPath)
|
}
|
||||||
cfg.Control.ClientLatencyDistributionAllPath = filepath.Join(cfg.Control.PathPrefix, cfg.Control.ClientLatencyDistributionAllPath)
|
|
||||||
cfg.Control.ClientLatencyDistributionPercentilePath = filepath.Join(cfg.Control.PathPrefix, cfg.Control.ClientLatencyDistributionPercentilePath)
|
if cfg.ConfigClientMachineInitial.PathPrefix != "" {
|
||||||
cfg.Control.ClientLatencyDistributionSummaryPath = filepath.Join(cfg.Control.PathPrefix, cfg.Control.ClientLatencyDistributionSummaryPath)
|
cfg.ConfigClientMachineInitial.LogPath = filepath.Join(cfg.ConfigClientMachineInitial.PathPrefix, cfg.ConfigClientMachineInitial.LogPath)
|
||||||
cfg.Control.ClientLatencyByKeyNumberPath = filepath.Join(cfg.Control.PathPrefix, cfg.Control.ClientLatencyByKeyNumberPath)
|
cfg.ConfigClientMachineInitial.ClientSystemMetricsPath = filepath.Join(cfg.ConfigClientMachineInitial.PathPrefix, cfg.ConfigClientMachineInitial.ClientSystemMetricsPath)
|
||||||
cfg.Control.ServerDiskSpaceUsageSummaryPath = filepath.Join(cfg.Control.PathPrefix, cfg.Control.ServerDiskSpaceUsageSummaryPath)
|
cfg.ConfigClientMachineInitial.ClientSystemMetricsInterpolatedPath = filepath.Join(cfg.ConfigClientMachineInitial.PathPrefix, cfg.ConfigClientMachineInitial.ClientSystemMetricsInterpolatedPath)
|
||||||
|
cfg.ConfigClientMachineInitial.ClientLatencyThroughputTimeseriesPath = filepath.Join(cfg.ConfigClientMachineInitial.PathPrefix, cfg.ConfigClientMachineInitial.ClientLatencyThroughputTimeseriesPath)
|
||||||
|
cfg.ConfigClientMachineInitial.ClientLatencyDistributionAllPath = filepath.Join(cfg.ConfigClientMachineInitial.PathPrefix, cfg.ConfigClientMachineInitial.ClientLatencyDistributionAllPath)
|
||||||
|
cfg.ConfigClientMachineInitial.ClientLatencyDistributionPercentilePath = filepath.Join(cfg.ConfigClientMachineInitial.PathPrefix, cfg.ConfigClientMachineInitial.ClientLatencyDistributionPercentilePath)
|
||||||
|
cfg.ConfigClientMachineInitial.ClientLatencyDistributionSummaryPath = filepath.Join(cfg.ConfigClientMachineInitial.PathPrefix, cfg.ConfigClientMachineInitial.ClientLatencyDistributionSummaryPath)
|
||||||
|
cfg.ConfigClientMachineInitial.ClientLatencyByKeyNumberPath = filepath.Join(cfg.ConfigClientMachineInitial.PathPrefix, cfg.ConfigClientMachineInitial.ClientLatencyByKeyNumberPath)
|
||||||
|
cfg.ConfigClientMachineInitial.ServerDiskSpaceUsageSummaryPath = filepath.Join(cfg.ConfigClientMachineInitial.PathPrefix, cfg.ConfigClientMachineInitial.ServerDiskSpaceUsageSummaryPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
for databaseID, group := range cfg.DatabaseIDToConfigClientMachineAgentControl {
|
||||||
|
if !dbtesterpb.IsValidDatabaseID(databaseID) {
|
||||||
|
return nil, fmt.Errorf("databaseID %q is unknown", databaseID)
|
||||||
}
|
}
|
||||||
|
|
||||||
for databaseID, group := range cfg.DatabaseIDToTestGroup {
|
|
||||||
group.DatabaseID = databaseID
|
group.DatabaseID = databaseID
|
||||||
group.DatabaseTag = MakeTag(group.DatabaseDescription)
|
group.DatabaseTag = MakeTag(group.DatabaseDescription)
|
||||||
group.PeerIPsString = strings.Join(group.PeerIPs, "___")
|
group.PeerIPsString = strings.Join(group.PeerIPs, "___")
|
||||||
|
|
@ -198,159 +95,322 @@ func ReadConfig(fpath string, analyze bool) (*Config, error) {
|
||||||
group.DatabaseEndpoints[j] = fmt.Sprintf("%s:%d", group.PeerIPs[j], group.DatabasePortToConnect)
|
group.DatabaseEndpoints[j] = fmt.Sprintf("%s:%d", group.PeerIPs[j], group.DatabasePortToConnect)
|
||||||
group.AgentEndpoints[j] = fmt.Sprintf("%s:%d", group.PeerIPs[j], group.AgentPortToConnect)
|
group.AgentEndpoints[j] = fmt.Sprintf("%s:%d", group.PeerIPs[j], group.AgentPortToConnect)
|
||||||
}
|
}
|
||||||
cfg.DatabaseIDToTestGroup[databaseID] = group
|
cfg.DatabaseIDToConfigClientMachineAgentControl[databaseID] = group
|
||||||
}
|
}
|
||||||
|
|
||||||
for databaseID, testdata := range cfg.DatabaseIDToTestData {
|
for databaseID, amc := range cfg.DatabaseIDToConfigAnalyzeMachineInitial {
|
||||||
testdata.PathPrefix = strings.TrimSpace(testdata.PathPrefix)
|
amc.PathPrefix = strings.TrimSpace(amc.PathPrefix)
|
||||||
testdata.DatabaseID = databaseID
|
amc.DatabaseID = databaseID
|
||||||
testdata.DatabaseTag = cfg.DatabaseIDToTestGroup[databaseID].DatabaseTag
|
amc.DatabaseTag = cfg.DatabaseIDToConfigClientMachineAgentControl[databaseID].DatabaseTag
|
||||||
testdata.DatabaseDescription = cfg.DatabaseIDToTestGroup[databaseID].DatabaseDescription
|
amc.DatabaseDescription = cfg.DatabaseIDToConfigClientMachineAgentControl[databaseID].DatabaseDescription
|
||||||
|
|
||||||
if testdata.PathPrefix != "" {
|
if amc.PathPrefix != "" {
|
||||||
testdata.ClientSystemMetricsInterpolatedPath = testdata.PathPrefix + "-" + testdata.ClientSystemMetricsInterpolatedPath
|
amc.ClientSystemMetricsInterpolatedPath = amc.PathPrefix + "-" + amc.ClientSystemMetricsInterpolatedPath
|
||||||
testdata.ClientLatencyThroughputTimeseriesPath = testdata.PathPrefix + "-" + testdata.ClientLatencyThroughputTimeseriesPath
|
amc.ClientLatencyThroughputTimeseriesPath = amc.PathPrefix + "-" + amc.ClientLatencyThroughputTimeseriesPath
|
||||||
testdata.ClientLatencyDistributionAllPath = testdata.PathPrefix + "-" + testdata.ClientLatencyDistributionAllPath
|
amc.ClientLatencyDistributionAllPath = amc.PathPrefix + "-" + amc.ClientLatencyDistributionAllPath
|
||||||
testdata.ClientLatencyDistributionPercentilePath = testdata.PathPrefix + "-" + testdata.ClientLatencyDistributionPercentilePath
|
amc.ClientLatencyDistributionPercentilePath = amc.PathPrefix + "-" + amc.ClientLatencyDistributionPercentilePath
|
||||||
testdata.ClientLatencyDistributionSummaryPath = testdata.PathPrefix + "-" + testdata.ClientLatencyDistributionSummaryPath
|
amc.ClientLatencyDistributionSummaryPath = amc.PathPrefix + "-" + amc.ClientLatencyDistributionSummaryPath
|
||||||
testdata.ClientLatencyByKeyNumberPath = testdata.PathPrefix + "-" + testdata.ClientLatencyByKeyNumberPath
|
amc.ClientLatencyByKeyNumberPath = amc.PathPrefix + "-" + amc.ClientLatencyByKeyNumberPath
|
||||||
testdata.ServerDiskSpaceUsageSummaryPath = testdata.PathPrefix + "-" + testdata.ServerDiskSpaceUsageSummaryPath
|
amc.ServerDiskSpaceUsageSummaryPath = amc.PathPrefix + "-" + amc.ServerDiskSpaceUsageSummaryPath
|
||||||
testdata.ServerMemoryByKeyNumberPath = testdata.PathPrefix + "-" + testdata.ServerMemoryByKeyNumberPath
|
amc.ServerMemoryByKeyNumberPath = amc.PathPrefix + "-" + amc.ServerMemoryByKeyNumberPath
|
||||||
testdata.ServerReadBytesDeltaByKeyNumberPath = testdata.PathPrefix + "-" + testdata.ServerReadBytesDeltaByKeyNumberPath
|
amc.ServerReadBytesDeltaByKeyNumberPath = amc.PathPrefix + "-" + amc.ServerReadBytesDeltaByKeyNumberPath
|
||||||
testdata.ServerWriteBytesDeltaByKeyNumberPath = testdata.PathPrefix + "-" + testdata.ServerWriteBytesDeltaByKeyNumberPath
|
amc.ServerWriteBytesDeltaByKeyNumberPath = amc.PathPrefix + "-" + amc.ServerWriteBytesDeltaByKeyNumberPath
|
||||||
for i := range testdata.ServerSystemMetricsInterpolatedPathList {
|
for i := range amc.ServerSystemMetricsInterpolatedPathList {
|
||||||
testdata.ServerSystemMetricsInterpolatedPathList[i] = testdata.PathPrefix + "-" + testdata.ServerSystemMetricsInterpolatedPathList[i]
|
amc.ServerSystemMetricsInterpolatedPathList[i] = amc.PathPrefix + "-" + amc.ServerSystemMetricsInterpolatedPathList[i]
|
||||||
}
|
}
|
||||||
testdata.AllAggregatedOutputPath = testdata.PathPrefix + "-" + testdata.AllAggregatedOutputPath
|
amc.AllAggregatedOutputPath = amc.PathPrefix + "-" + amc.AllAggregatedOutputPath
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.DatabaseIDToTestData[databaseID] = testdata
|
cfg.DatabaseIDToConfigAnalyzeMachineInitial[databaseID] = amc
|
||||||
}
|
}
|
||||||
|
|
||||||
for databaseID, group := range cfg.DatabaseIDToTestGroup {
|
for databaseID, ctrl := range cfg.DatabaseIDToConfigClientMachineAgentControl {
|
||||||
if databaseID != "etcdv3" && group.BenchmarkOptions.ConnectionNumber != group.BenchmarkOptions.ClientNumber {
|
if databaseID != dbtesterpb.DatabaseID_etcd__v3_1.String() && ctrl.ConfigClientMachineBenchmarkOptions.ConnectionNumber != ctrl.ConfigClientMachineBenchmarkOptions.ClientNumber {
|
||||||
return nil, fmt.Errorf("%q got connected %d != clients %d", databaseID, group.BenchmarkOptions.ConnectionNumber, group.BenchmarkOptions.ClientNumber)
|
return nil, fmt.Errorf("%q got connected %d != clients %d", databaseID, ctrl.ConfigClientMachineBenchmarkOptions.ConnectionNumber, ctrl.ConfigClientMachineBenchmarkOptions.ClientNumber)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
const (
|
||||||
defaultEtcdSnapCount int64 = 100000
|
defaultAgentPort int64 = 3500
|
||||||
|
defaultEtcdClientPort int64 = 2379
|
||||||
|
defaultZookeeperClientPort int64 = 2181
|
||||||
|
defaultConsulClientPort int64 = 8500
|
||||||
|
|
||||||
|
defaultEtcdSnapshotCount int64 = 100000
|
||||||
|
defaultEtcdQuotaSizeBytes int64 = 8000000000
|
||||||
defaultZookeeperSnapCount int64 = 100000
|
defaultZookeeperSnapCount int64 = 100000
|
||||||
defaultZookeeperTickTime int64 = 2000
|
defaultZookeeperTickTime int64 = 2000
|
||||||
defaultZookeeperInitLimit int64 = 5
|
defaultZookeeperInitLimit int64 = 5
|
||||||
defaultZookeeperSyncLimit int64 = 5
|
defaultZookeeperSyncLimit int64 = 5
|
||||||
defaultZookeeperMaxClientConnections int64 = 5000
|
defaultZookeeperMaxClientConnections int64 = 5000
|
||||||
)
|
)
|
||||||
if v, ok := cfg.DatabaseIDToTestGroup["etcdv3"]; ok {
|
|
||||||
if v.Etcdv3.SnapCount == 0 {
|
if v, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__v2_3.String()]; ok {
|
||||||
v.Etcdv3.SnapCount = defaultEtcdSnapCount
|
if v.AgentPortToConnect == 0 {
|
||||||
|
v.AgentPortToConnect = defaultAgentPort
|
||||||
}
|
}
|
||||||
cfg.DatabaseIDToTestGroup["etcdv3"] = v
|
if v.DatabasePortToConnect == 0 {
|
||||||
|
v.DatabasePortToConnect = defaultEtcdClientPort
|
||||||
}
|
}
|
||||||
if v, ok := cfg.DatabaseIDToTestGroup["etcdtip"]; ok {
|
if v.Flag_Etcd_V2_3.SnapshotCount == 0 {
|
||||||
if v.Etcdv3.SnapCount == 0 {
|
v.Flag_Etcd_V2_3.SnapshotCount = defaultEtcdSnapshotCount
|
||||||
v.Etcdv3.SnapCount = defaultEtcdSnapCount
|
|
||||||
}
|
}
|
||||||
cfg.DatabaseIDToTestGroup["etcdtip"] = v
|
cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__v2_3.String()] = v
|
||||||
}
|
}
|
||||||
if v, ok := cfg.DatabaseIDToTestGroup["zookeeper"]; ok {
|
if v, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__v3_1.String()]; ok {
|
||||||
if v.Zookeeper.TickTime == 0 {
|
if v.AgentPortToConnect == 0 {
|
||||||
v.Zookeeper.TickTime = defaultZookeeperTickTime
|
v.AgentPortToConnect = defaultAgentPort
|
||||||
}
|
}
|
||||||
if v.Zookeeper.InitLimit == 0 {
|
if v.DatabasePortToConnect == 0 {
|
||||||
v.Zookeeper.InitLimit = defaultZookeeperInitLimit
|
v.DatabasePortToConnect = defaultEtcdClientPort
|
||||||
}
|
}
|
||||||
if v.Zookeeper.SyncLimit == 0 {
|
if v.Flag_Etcd_V3_1.SnapshotCount == 0 {
|
||||||
v.Zookeeper.SyncLimit = defaultZookeeperSyncLimit
|
v.Flag_Etcd_V3_1.SnapshotCount = defaultEtcdSnapshotCount
|
||||||
}
|
}
|
||||||
if v.Zookeeper.SnapCount == 0 {
|
if v.Flag_Etcd_V3_1.QuotaSizeBytes == 0 {
|
||||||
v.Zookeeper.SnapCount = defaultZookeeperSnapCount
|
v.Flag_Etcd_V3_1.QuotaSizeBytes = defaultEtcdQuotaSizeBytes
|
||||||
}
|
}
|
||||||
if v.Zookeeper.MaxClientConnections == 0 {
|
cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__v3_1.String()] = v
|
||||||
v.Zookeeper.MaxClientConnections = defaultZookeeperMaxClientConnections
|
|
||||||
}
|
}
|
||||||
cfg.DatabaseIDToTestGroup["zookeeper"] = v
|
if v, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__v3_2.String()]; ok {
|
||||||
|
if v.AgentPortToConnect == 0 {
|
||||||
|
v.AgentPortToConnect = defaultAgentPort
|
||||||
|
}
|
||||||
|
if v.DatabasePortToConnect == 0 {
|
||||||
|
v.DatabasePortToConnect = defaultEtcdClientPort
|
||||||
|
}
|
||||||
|
if v.Flag_Etcd_V3_2.SnapshotCount == 0 {
|
||||||
|
v.Flag_Etcd_V3_2.SnapshotCount = defaultEtcdSnapshotCount
|
||||||
|
}
|
||||||
|
if v.Flag_Etcd_V3_2.QuotaSizeBytes == 0 {
|
||||||
|
v.Flag_Etcd_V3_2.QuotaSizeBytes = defaultEtcdQuotaSizeBytes
|
||||||
|
}
|
||||||
|
cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__v3_2.String()] = v
|
||||||
|
}
|
||||||
|
if v, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__tip.String()]; ok {
|
||||||
|
if v.AgentPortToConnect == 0 {
|
||||||
|
v.AgentPortToConnect = defaultAgentPort
|
||||||
|
}
|
||||||
|
if v.DatabasePortToConnect == 0 {
|
||||||
|
v.DatabasePortToConnect = defaultEtcdClientPort
|
||||||
|
}
|
||||||
|
if v.Flag_Etcd_Tip.SnapshotCount == 0 {
|
||||||
|
v.Flag_Etcd_Tip.SnapshotCount = defaultEtcdSnapshotCount
|
||||||
|
}
|
||||||
|
if v.Flag_Etcd_Tip.QuotaSizeBytes == 0 {
|
||||||
|
v.Flag_Etcd_Tip.QuotaSizeBytes = defaultEtcdQuotaSizeBytes
|
||||||
|
}
|
||||||
|
cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__tip.String()] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
if cfg.Control.GoogleCloudStorageKeyPath != "" && !analyze {
|
if v, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_zookeeper__r3_4_9.String()]; ok {
|
||||||
bts, err = ioutil.ReadFile(cfg.Control.GoogleCloudStorageKeyPath)
|
if v.AgentPortToConnect == 0 {
|
||||||
|
v.AgentPortToConnect = defaultAgentPort
|
||||||
|
}
|
||||||
|
if v.DatabasePortToConnect == 0 {
|
||||||
|
v.DatabasePortToConnect = defaultZookeeperClientPort
|
||||||
|
}
|
||||||
|
|
||||||
|
v.Flag_Zookeeper_R3_4_9.ClientPort = v.DatabasePortToConnect
|
||||||
|
if v.Flag_Zookeeper_R3_4_9.TickTime == 0 {
|
||||||
|
v.Flag_Zookeeper_R3_4_9.TickTime = defaultZookeeperTickTime
|
||||||
|
}
|
||||||
|
if v.Flag_Zookeeper_R3_4_9.InitLimit == 0 {
|
||||||
|
v.Flag_Zookeeper_R3_4_9.InitLimit = defaultZookeeperInitLimit
|
||||||
|
}
|
||||||
|
if v.Flag_Zookeeper_R3_4_9.SyncLimit == 0 {
|
||||||
|
v.Flag_Zookeeper_R3_4_9.SyncLimit = defaultZookeeperSyncLimit
|
||||||
|
}
|
||||||
|
if v.Flag_Zookeeper_R3_4_9.SnapCount == 0 {
|
||||||
|
v.Flag_Zookeeper_R3_4_9.SnapCount = defaultZookeeperSnapCount
|
||||||
|
}
|
||||||
|
if v.Flag_Zookeeper_R3_4_9.MaxClientConnections == 0 {
|
||||||
|
v.Flag_Zookeeper_R3_4_9.MaxClientConnections = defaultZookeeperMaxClientConnections
|
||||||
|
}
|
||||||
|
cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_zookeeper__r3_4_9.String()] = v
|
||||||
|
}
|
||||||
|
if v, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_zookeeper__r3_5_2_alpha.String()]; ok {
|
||||||
|
if v.AgentPortToConnect == 0 {
|
||||||
|
v.AgentPortToConnect = defaultAgentPort
|
||||||
|
}
|
||||||
|
if v.DatabasePortToConnect == 0 {
|
||||||
|
v.DatabasePortToConnect = defaultZookeeperClientPort
|
||||||
|
}
|
||||||
|
|
||||||
|
v.Flag_Zookeeper_R3_5_2Alpha.ClientPort = v.DatabasePortToConnect
|
||||||
|
if v.Flag_Zookeeper_R3_5_2Alpha.TickTime == 0 {
|
||||||
|
v.Flag_Zookeeper_R3_5_2Alpha.TickTime = defaultZookeeperTickTime
|
||||||
|
}
|
||||||
|
if v.Flag_Zookeeper_R3_5_2Alpha.TickTime == 0 {
|
||||||
|
v.Flag_Zookeeper_R3_5_2Alpha.TickTime = defaultZookeeperTickTime
|
||||||
|
}
|
||||||
|
if v.Flag_Zookeeper_R3_5_2Alpha.InitLimit == 0 {
|
||||||
|
v.Flag_Zookeeper_R3_5_2Alpha.InitLimit = defaultZookeeperInitLimit
|
||||||
|
}
|
||||||
|
if v.Flag_Zookeeper_R3_5_2Alpha.SyncLimit == 0 {
|
||||||
|
v.Flag_Zookeeper_R3_5_2Alpha.SyncLimit = defaultZookeeperSyncLimit
|
||||||
|
}
|
||||||
|
if v.Flag_Zookeeper_R3_5_2Alpha.SnapCount == 0 {
|
||||||
|
v.Flag_Zookeeper_R3_5_2Alpha.SnapCount = defaultZookeeperSnapCount
|
||||||
|
}
|
||||||
|
if v.Flag_Zookeeper_R3_5_2Alpha.MaxClientConnections == 0 {
|
||||||
|
v.Flag_Zookeeper_R3_5_2Alpha.MaxClientConnections = defaultZookeeperMaxClientConnections
|
||||||
|
}
|
||||||
|
cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_zookeeper__r3_5_2_alpha.String()] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_consul__v0_7_5.String()]; ok {
|
||||||
|
if v.AgentPortToConnect == 0 {
|
||||||
|
v.AgentPortToConnect = defaultAgentPort
|
||||||
|
}
|
||||||
|
if v.DatabasePortToConnect == 0 {
|
||||||
|
v.DatabasePortToConnect = defaultConsulClientPort
|
||||||
|
}
|
||||||
|
cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_consul__v0_7_5.String()] = v
|
||||||
|
}
|
||||||
|
if v, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_consul__v0_8_0.String()]; ok {
|
||||||
|
if v.AgentPortToConnect == 0 {
|
||||||
|
v.AgentPortToConnect = defaultAgentPort
|
||||||
|
}
|
||||||
|
if v.DatabasePortToConnect == 0 {
|
||||||
|
v.DatabasePortToConnect = defaultConsulClientPort
|
||||||
|
}
|
||||||
|
cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_consul__v0_8_0.String()] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// need etcd configs since it's backed by etcd
|
||||||
|
if _, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_zetcd__beta.String()]; ok {
|
||||||
|
_, ok1 := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__v2_3.String()]
|
||||||
|
_, ok2 := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__v3_1.String()]
|
||||||
|
_, ok3 := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__v3_2.String()]
|
||||||
|
_, ok4 := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__tip.String()]
|
||||||
|
if !ok1 && !ok2 && !ok3 && !ok4 {
|
||||||
|
return nil, fmt.Errorf("got %q config, but no etcd config is given", dbtesterpb.DatabaseID_zetcd__beta.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_cetcd__beta.String()]; ok {
|
||||||
|
_, ok1 := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__v2_3.String()]
|
||||||
|
_, ok2 := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__v3_1.String()]
|
||||||
|
_, ok3 := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__v3_2.String()]
|
||||||
|
_, ok4 := cfg.DatabaseIDToConfigClientMachineAgentControl[dbtesterpb.DatabaseID_etcd__tip.String()]
|
||||||
|
if !ok1 && !ok2 && !ok3 && !ok4 {
|
||||||
|
return nil, fmt.Errorf("got %q config, but no etcd config is given", dbtesterpb.DatabaseID_cetcd__beta.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.ConfigClientMachineInitial.GoogleCloudStorageKeyPath != "" && !analyze {
|
||||||
|
bts, err = ioutil.ReadFile(cfg.ConfigClientMachineInitial.GoogleCloudStorageKeyPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
cfg.Control.GoogleCloudStorageKey = string(bts)
|
cfg.ConfigClientMachineInitial.GoogleCloudStorageKey = string(bts)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range cfg.PlotList {
|
for i := range cfg.AnalyzePlotList {
|
||||||
cfg.PlotList[i].OutputPathCSV = filepath.Join(cfg.PlotPathPrefix, cfg.PlotList[i].Column+".csv")
|
cfg.AnalyzePlotList[i].OutputPathCSV = filepath.Join(cfg.AnalyzePlotPathPrefix, cfg.AnalyzePlotList[i].Column+".csv")
|
||||||
cfg.PlotList[i].OutputPathList = make([]string, 2)
|
cfg.AnalyzePlotList[i].OutputPathList = make([]string, 2)
|
||||||
cfg.PlotList[i].OutputPathList[0] = filepath.Join(cfg.PlotPathPrefix, cfg.PlotList[i].Column+".svg")
|
cfg.AnalyzePlotList[i].OutputPathList[0] = filepath.Join(cfg.AnalyzePlotPathPrefix, cfg.AnalyzePlotList[i].Column+".svg")
|
||||||
cfg.PlotList[i].OutputPathList[1] = filepath.Join(cfg.PlotPathPrefix, cfg.PlotList[i].Column+".png")
|
cfg.AnalyzePlotList[i].OutputPathList[1] = filepath.Join(cfg.AnalyzePlotPathPrefix, cfg.AnalyzePlotList[i].Column+".png")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &cfg, nil
|
return &cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MakeTag converts database scription to database tag.
|
const maxEtcdQuotaSize = 8000000000
|
||||||
func MakeTag(desc string) string {
|
|
||||||
s := strings.ToLower(desc)
|
|
||||||
s = strings.Replace(s, "go ", "go", -1)
|
|
||||||
s = strings.Replace(s, "java ", "java", -1)
|
|
||||||
s = strings.Replace(s, "(", "", -1)
|
|
||||||
s = strings.Replace(s, ")", "", -1)
|
|
||||||
return strings.Replace(s, " ", "-", -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToRequest converts configuration to 'dbtesterpb.Request'.
|
// ToRequest converts configuration to 'dbtesterpb.Request'.
|
||||||
func (cfg *Config) ToRequest(databaseID string, op dbtesterpb.Request_Operation, idx int) (req *dbtesterpb.Request, err error) {
|
func (cfg *Config) ToRequest(databaseID string, op dbtesterpb.Operation, idx int) (req *dbtesterpb.Request, err error) {
|
||||||
gcfg, ok := cfg.DatabaseIDToTestGroup[databaseID]
|
gcfg, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[databaseID]
|
||||||
if !ok {
|
if !ok {
|
||||||
err = fmt.Errorf("%q is not defined", databaseID)
|
err = fmt.Errorf("database ID %q is not defined", databaseID)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
did := dbtesterpb.DatabaseID(dbtesterpb.DatabaseID_value[databaseID])
|
||||||
|
|
||||||
req = &dbtesterpb.Request{
|
req = &dbtesterpb.Request{
|
||||||
Operation: op,
|
Operation: op,
|
||||||
TriggerLogUpload: gcfg.BenchmarkSteps.Step4UploadLogs,
|
TriggerLogUpload: gcfg.ConfigClientMachineBenchmarkSteps.Step4UploadLogs,
|
||||||
DatabaseID: dbtesterpb.Request_Database(dbtesterpb.Request_Database_value[databaseID]),
|
DatabaseID: did,
|
||||||
DatabaseTag: gcfg.DatabaseTag,
|
DatabaseTag: gcfg.DatabaseTag,
|
||||||
PeerIPsString: gcfg.PeerIPsString,
|
PeerIPsString: gcfg.PeerIPsString,
|
||||||
IpIndex: uint32(idx),
|
IPIndex: uint32(idx),
|
||||||
CurrentClientNumber: gcfg.BenchmarkOptions.ClientNumber,
|
CurrentClientNumber: gcfg.ConfigClientMachineBenchmarkOptions.ClientNumber,
|
||||||
Control: &dbtesterpb.Request_Control{
|
ConfigClientMachineInitial: &dbtesterpb.ConfigClientMachineInitial{
|
||||||
GoogleCloudProjectName: cfg.Control.GoogleCloudProjectName,
|
GoogleCloudProjectName: cfg.ConfigClientMachineInitial.GoogleCloudProjectName,
|
||||||
GoogleCloudStorageKey: cfg.Control.GoogleCloudStorageKey,
|
GoogleCloudStorageKey: cfg.ConfigClientMachineInitial.GoogleCloudStorageKey,
|
||||||
GoogleCloudStorageBucketName: cfg.Control.GoogleCloudStorageBucketName,
|
GoogleCloudStorageBucketName: cfg.ConfigClientMachineInitial.GoogleCloudStorageBucketName,
|
||||||
GoogleCloudStorageSubDirectory: cfg.Control.GoogleCloudStorageSubDirectory,
|
GoogleCloudStorageSubDirectory: cfg.ConfigClientMachineInitial.GoogleCloudStorageSubDirectory,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
switch req.DatabaseID {
|
switch req.DatabaseID {
|
||||||
case dbtesterpb.Request_etcdv2:
|
case dbtesterpb.DatabaseID_etcd__v2_3:
|
||||||
|
req.Flag_Etcd_V2_3 = &dbtesterpb.Flag_Etcd_V2_3{
|
||||||
case dbtesterpb.Request_etcdv3:
|
SnapshotCount: gcfg.Flag_Etcd_V2_3.SnapshotCount,
|
||||||
req.Etcdv3Config = &dbtesterpb.Request_Etcdv3{
|
}
|
||||||
SnapCount: gcfg.Etcdv3.SnapCount,
|
case dbtesterpb.DatabaseID_etcd__v3_1:
|
||||||
QuotaSizeBytes: gcfg.Etcdv3.QuotaSizeBytes,
|
if gcfg.Flag_Etcd_V3_1.QuotaSizeBytes > maxEtcdQuotaSize {
|
||||||
|
err = fmt.Errorf("maximum etcd quota is 8 GB (%d), got %d", maxEtcdQuotaSize, gcfg.Flag_Etcd_V3_1.QuotaSizeBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Flag_Etcd_V3_1 = &dbtesterpb.Flag_Etcd_V3_1{
|
||||||
|
SnapshotCount: gcfg.Flag_Etcd_V3_1.SnapshotCount,
|
||||||
|
QuotaSizeBytes: gcfg.Flag_Etcd_V3_1.QuotaSizeBytes,
|
||||||
|
}
|
||||||
|
case dbtesterpb.DatabaseID_etcd__v3_2:
|
||||||
|
if gcfg.Flag_Etcd_V3_2.QuotaSizeBytes > maxEtcdQuotaSize {
|
||||||
|
err = fmt.Errorf("maximum etcd quota is 8 GB (%d), got %d", maxEtcdQuotaSize, gcfg.Flag_Etcd_V3_2.QuotaSizeBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Flag_Etcd_V3_2 = &dbtesterpb.Flag_Etcd_V3_2{
|
||||||
|
SnapshotCount: gcfg.Flag_Etcd_V3_2.SnapshotCount,
|
||||||
|
QuotaSizeBytes: gcfg.Flag_Etcd_V3_2.QuotaSizeBytes,
|
||||||
|
}
|
||||||
|
case dbtesterpb.DatabaseID_etcd__tip:
|
||||||
|
if gcfg.Flag_Etcd_Tip.QuotaSizeBytes > maxEtcdQuotaSize {
|
||||||
|
err = fmt.Errorf("maximum etcd quota is 8 GB (%d), got %d", maxEtcdQuotaSize, gcfg.Flag_Etcd_Tip.QuotaSizeBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
req.Flag_Etcd_Tip = &dbtesterpb.Flag_Etcd_Tip{
|
||||||
|
SnapshotCount: gcfg.Flag_Etcd_Tip.SnapshotCount,
|
||||||
|
QuotaSizeBytes: gcfg.Flag_Etcd_Tip.QuotaSizeBytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
case dbtesterpb.Request_zookeeper:
|
case dbtesterpb.DatabaseID_zookeeper__r3_4_9:
|
||||||
req.ZookeeperConfig = &dbtesterpb.Request_Zookeeper{
|
req.Flag_Zookeeper_R3_4_9 = &dbtesterpb.Flag_Zookeeper_R3_4_9{
|
||||||
|
JavaDJuteMaxBuffer: gcfg.Flag_Zookeeper_R3_4_9.JavaDJuteMaxBuffer,
|
||||||
|
JavaXms: gcfg.Flag_Zookeeper_R3_4_9.JavaXms,
|
||||||
|
JavaXmx: gcfg.Flag_Zookeeper_R3_4_9.JavaXmx,
|
||||||
MyID: uint32(idx + 1),
|
MyID: uint32(idx + 1),
|
||||||
TickTime: gcfg.Zookeeper.TickTime,
|
ClientPort: gcfg.Flag_Zookeeper_R3_4_9.ClientPort,
|
||||||
ClientPort: int64(gcfg.DatabasePortToConnect),
|
TickTime: gcfg.Flag_Zookeeper_R3_4_9.TickTime,
|
||||||
InitLimit: gcfg.Zookeeper.InitLimit,
|
InitLimit: gcfg.Flag_Zookeeper_R3_4_9.InitLimit,
|
||||||
SyncLimit: gcfg.Zookeeper.SyncLimit,
|
SyncLimit: gcfg.Flag_Zookeeper_R3_4_9.SyncLimit,
|
||||||
SnapCount: gcfg.Zookeeper.SnapCount,
|
SnapCount: gcfg.Flag_Zookeeper_R3_4_9.SnapCount,
|
||||||
MaxClientConnections: gcfg.Zookeeper.MaxClientConnections,
|
MaxClientConnections: gcfg.Flag_Zookeeper_R3_4_9.MaxClientConnections,
|
||||||
|
}
|
||||||
|
case dbtesterpb.DatabaseID_zookeeper__r3_5_2_alpha:
|
||||||
|
req.Flag_Zookeeper_R3_5_2Alpha = &dbtesterpb.Flag_Zookeeper_R3_5_2Alpha{
|
||||||
|
JavaDJuteMaxBuffer: gcfg.Flag_Zookeeper_R3_5_2Alpha.JavaDJuteMaxBuffer,
|
||||||
|
JavaXms: gcfg.Flag_Zookeeper_R3_5_2Alpha.JavaXms,
|
||||||
|
JavaXmx: gcfg.Flag_Zookeeper_R3_5_2Alpha.JavaXmx,
|
||||||
|
MyID: uint32(idx + 1),
|
||||||
|
ClientPort: gcfg.Flag_Zookeeper_R3_5_2Alpha.ClientPort,
|
||||||
|
TickTime: gcfg.Flag_Zookeeper_R3_5_2Alpha.TickTime,
|
||||||
|
InitLimit: gcfg.Flag_Zookeeper_R3_5_2Alpha.InitLimit,
|
||||||
|
SyncLimit: gcfg.Flag_Zookeeper_R3_5_2Alpha.SyncLimit,
|
||||||
|
SnapCount: gcfg.Flag_Zookeeper_R3_5_2Alpha.SnapCount,
|
||||||
|
MaxClientConnections: gcfg.Flag_Zookeeper_R3_5_2Alpha.MaxClientConnections,
|
||||||
}
|
}
|
||||||
|
|
||||||
case dbtesterpb.Request_consul:
|
case dbtesterpb.DatabaseID_consul__v0_7_5:
|
||||||
case dbtesterpb.Request_zetcd:
|
case dbtesterpb.DatabaseID_consul__v0_8_0:
|
||||||
case dbtesterpb.Request_cetcd:
|
|
||||||
|
case dbtesterpb.DatabaseID_zetcd__beta:
|
||||||
|
case dbtesterpb.DatabaseID_cetcd__beta:
|
||||||
|
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("unknown %v", req.DatabaseID)
|
err = fmt.Errorf("unknown %v", req.DatabaseID)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
|
||||||
|
|
@ -29,17 +29,18 @@ func TestConfig(t *testing.T) {
|
||||||
expected := &Config{
|
expected := &Config{
|
||||||
TestTitle: "Write 1M keys, 256-byte key, 1KB value value, clients 1 to 1,000",
|
TestTitle: "Write 1M keys, 256-byte key, 1KB value value, clients 1 to 1,000",
|
||||||
TestDescription: `- Google Cloud Compute Engine
|
TestDescription: `- Google Cloud Compute Engine
|
||||||
- 4 machines of 16 vCPUs + 30 GB Memory + 300 GB SSD (1 for client)
|
- 4 machines of 16 vCPUs + 60 GB Memory + 300 GB SSD (1 for client)
|
||||||
- Ubuntu 16.10
|
- Ubuntu 16.10
|
||||||
- etcd v3.1 (Go 1.7.5)
|
- etcd tip (Go 1.8.0)
|
||||||
- Zookeeper r3.4.9
|
- Zookeeper r3.5.2-alpha
|
||||||
- Java 8
|
- Java 8
|
||||||
- javac 1.8.0_121
|
- javac 1.8.0_121
|
||||||
- Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
|
- Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
|
||||||
- Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)
|
- Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)
|
||||||
- Consul v0.7.4 (Go 1.7.5)
|
- ` + "`/usr/bin/java -Djute.maxbuffer=33554432 -Xms50G -Xmx50G`" + `
|
||||||
|
- Consul v0.7.5 (Go 1.8.0)
|
||||||
`,
|
`,
|
||||||
Control: Control{
|
ConfigClientMachineInitial: dbtesterpb.ConfigClientMachineInitial{
|
||||||
PathPrefix: "/home/gyuho",
|
PathPrefix: "/home/gyuho",
|
||||||
LogPath: "/home/gyuho/client-control.log",
|
LogPath: "/home/gyuho/client-control.log",
|
||||||
ClientSystemMetricsPath: "/home/gyuho/client-system-metrics.csv",
|
ClientSystemMetricsPath: "/home/gyuho/client-system-metrics.csv",
|
||||||
|
|
@ -54,25 +55,25 @@ func TestConfig(t *testing.T) {
|
||||||
GoogleCloudStorageKeyPath: "config-dbtester-gcloud-key.json",
|
GoogleCloudStorageKeyPath: "config-dbtester-gcloud-key.json",
|
||||||
GoogleCloudStorageKey: "test-key",
|
GoogleCloudStorageKey: "test-key",
|
||||||
GoogleCloudStorageBucketName: "dbtester-results",
|
GoogleCloudStorageBucketName: "dbtester-results",
|
||||||
GoogleCloudStorageSubDirectory: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable",
|
GoogleCloudStorageSubDirectory: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable",
|
||||||
},
|
},
|
||||||
AllDatabaseIDList: []string{"etcdv3", "zookeeper", "consul"},
|
AllDatabaseIDList: []string{"etcd__tip", "zookeeper__r3_5_2_alpha", "consul__v0_7_5"},
|
||||||
DatabaseIDToTestGroup: map[string]TestGroup{
|
DatabaseIDToConfigClientMachineAgentControl: map[string]dbtesterpb.ConfigClientMachineAgentControl{
|
||||||
"etcdv3": {
|
"etcd__tip": {
|
||||||
DatabaseID: "etcdv3",
|
DatabaseID: "etcd__tip",
|
||||||
DatabaseTag: "etcd-v3.1-go1.7.5",
|
DatabaseTag: "etcd-tip-go1.8.0",
|
||||||
DatabaseDescription: "etcd v3.1 (Go 1.7.5)",
|
DatabaseDescription: "etcd tip (Go 1.8.0)",
|
||||||
PeerIPs: []string{"10.240.0.20", "10.240.0.21", "10.240.0.22"},
|
PeerIPs: []string{"10.240.0.7", "10.240.0.8", "10.240.0.12"},
|
||||||
PeerIPsString: "10.240.0.20___10.240.0.21___10.240.0.22",
|
PeerIPsString: "10.240.0.7___10.240.0.8___10.240.0.12",
|
||||||
DatabasePortToConnect: 2379,
|
DatabasePortToConnect: 2379,
|
||||||
DatabaseEndpoints: []string{"10.240.0.20:2379", "10.240.0.21:2379", "10.240.0.22:2379"},
|
DatabaseEndpoints: []string{"10.240.0.7:2379", "10.240.0.8:2379", "10.240.0.12:2379"},
|
||||||
AgentPortToConnect: 3500,
|
AgentPortToConnect: 3500,
|
||||||
AgentEndpoints: []string{"10.240.0.20:3500", "10.240.0.21:3500", "10.240.0.22:3500"},
|
AgentEndpoints: []string{"10.240.0.7:3500", "10.240.0.8:3500", "10.240.0.12:3500"},
|
||||||
Etcdv3: Etcdv3{
|
Flag_Etcd_Tip: &dbtesterpb.Flag_Etcd_Tip{
|
||||||
SnapCount: 100000,
|
SnapshotCount: 100000,
|
||||||
QuotaSizeBytes: 8000000000,
|
QuotaSizeBytes: 8000000000,
|
||||||
},
|
},
|
||||||
BenchmarkOptions: BenchmarkOptions{
|
ConfigClientMachineBenchmarkOptions: &dbtesterpb.ConfigClientMachineBenchmarkOptions{
|
||||||
Type: "write",
|
Type: "write",
|
||||||
RequestNumber: 1000000,
|
RequestNumber: 1000000,
|
||||||
ConnectionNumber: 0,
|
ConnectionNumber: 0,
|
||||||
|
|
@ -84,31 +85,35 @@ func TestConfig(t *testing.T) {
|
||||||
ValueSizeBytes: 1024,
|
ValueSizeBytes: 1024,
|
||||||
StaleRead: false,
|
StaleRead: false,
|
||||||
},
|
},
|
||||||
BenchmarkSteps: BenchmarkSteps{
|
ConfigClientMachineBenchmarkSteps: &dbtesterpb.ConfigClientMachineBenchmarkSteps{
|
||||||
Step1StartDatabase: true,
|
Step1StartDatabase: true,
|
||||||
Step2StressDatabase: true,
|
Step2StressDatabase: true,
|
||||||
Step3StopDatabase: true,
|
Step3StopDatabase: true,
|
||||||
Step4UploadLogs: true,
|
Step4UploadLogs: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"zookeeper": {
|
"zookeeper__r3_5_2_alpha": {
|
||||||
DatabaseID: "zookeeper",
|
DatabaseID: "zookeeper__r3_5_2_alpha",
|
||||||
DatabaseTag: "zookeeper-r3.4.9-java8",
|
DatabaseTag: "zookeeper-r3.5.2-alpha-java8",
|
||||||
DatabaseDescription: "Zookeeper r3.4.9 (Java 8)",
|
DatabaseDescription: "Zookeeper r3.5.2-alpha (Java 8)",
|
||||||
PeerIPs: []string{"10.240.0.25", "10.240.0.27", "10.240.0.28"},
|
PeerIPs: []string{"10.240.0.21", "10.240.0.22", "10.240.0.23"},
|
||||||
PeerIPsString: "10.240.0.25___10.240.0.27___10.240.0.28",
|
PeerIPsString: "10.240.0.21___10.240.0.22___10.240.0.23",
|
||||||
DatabasePortToConnect: 2181,
|
DatabasePortToConnect: 2181,
|
||||||
DatabaseEndpoints: []string{"10.240.0.25:2181", "10.240.0.27:2181", "10.240.0.28:2181"},
|
DatabaseEndpoints: []string{"10.240.0.21:2181", "10.240.0.22:2181", "10.240.0.23:2181"},
|
||||||
AgentPortToConnect: 3500,
|
AgentPortToConnect: 3500,
|
||||||
AgentEndpoints: []string{"10.240.0.25:3500", "10.240.0.27:3500", "10.240.0.28:3500"},
|
AgentEndpoints: []string{"10.240.0.21:3500", "10.240.0.22:3500", "10.240.0.23:3500"},
|
||||||
Zookeeper: Zookeeper{
|
Flag_Zookeeper_R3_5_2Alpha: &dbtesterpb.Flag_Zookeeper_R3_5_2Alpha{
|
||||||
|
JavaDJuteMaxBuffer: 33554432,
|
||||||
|
JavaXms: "50G",
|
||||||
|
JavaXmx: "50G",
|
||||||
|
ClientPort: 2181,
|
||||||
TickTime: 2000,
|
TickTime: 2000,
|
||||||
InitLimit: 5,
|
InitLimit: 5,
|
||||||
SyncLimit: 5,
|
SyncLimit: 5,
|
||||||
SnapCount: 100000,
|
SnapCount: 100000,
|
||||||
MaxClientConnections: 5000,
|
MaxClientConnections: 5000,
|
||||||
},
|
},
|
||||||
BenchmarkOptions: BenchmarkOptions{
|
ConfigClientMachineBenchmarkOptions: &dbtesterpb.ConfigClientMachineBenchmarkOptions{
|
||||||
Type: "write",
|
Type: "write",
|
||||||
RequestNumber: 1000000,
|
RequestNumber: 1000000,
|
||||||
ConnectionNumber: 0,
|
ConnectionNumber: 0,
|
||||||
|
|
@ -120,24 +125,24 @@ func TestConfig(t *testing.T) {
|
||||||
ValueSizeBytes: 1024,
|
ValueSizeBytes: 1024,
|
||||||
StaleRead: false,
|
StaleRead: false,
|
||||||
},
|
},
|
||||||
BenchmarkSteps: BenchmarkSteps{
|
ConfigClientMachineBenchmarkSteps: &dbtesterpb.ConfigClientMachineBenchmarkSteps{
|
||||||
Step1StartDatabase: true,
|
Step1StartDatabase: true,
|
||||||
Step2StressDatabase: true,
|
Step2StressDatabase: true,
|
||||||
Step3StopDatabase: true,
|
Step3StopDatabase: true,
|
||||||
Step4UploadLogs: true,
|
Step4UploadLogs: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"consul": {
|
"consul__v0_7_5": {
|
||||||
DatabaseID: "consul",
|
DatabaseID: "consul__v0_7_5",
|
||||||
DatabaseTag: "consul-v0.7.4-go1.7.5",
|
DatabaseTag: "consul-v0.7.5-go1.8.0",
|
||||||
DatabaseDescription: "Consul v0.7.4 (Go 1.7.5)",
|
DatabaseDescription: "Consul v0.7.5 (Go 1.8.0)",
|
||||||
PeerIPs: []string{"10.240.0.30", "10.240.0.31", "10.240.0.33"},
|
PeerIPs: []string{"10.240.0.27", "10.240.0.28", "10.240.0.29"},
|
||||||
PeerIPsString: "10.240.0.30___10.240.0.31___10.240.0.33",
|
PeerIPsString: "10.240.0.27___10.240.0.28___10.240.0.29",
|
||||||
DatabasePortToConnect: 8500,
|
DatabasePortToConnect: 8500,
|
||||||
DatabaseEndpoints: []string{"10.240.0.30:8500", "10.240.0.31:8500", "10.240.0.33:8500"},
|
DatabaseEndpoints: []string{"10.240.0.27:8500", "10.240.0.28:8500", "10.240.0.29:8500"},
|
||||||
AgentPortToConnect: 3500,
|
AgentPortToConnect: 3500,
|
||||||
AgentEndpoints: []string{"10.240.0.30:3500", "10.240.0.31:3500", "10.240.0.33:3500"},
|
AgentEndpoints: []string{"10.240.0.27:3500", "10.240.0.28:3500", "10.240.0.29:3500"},
|
||||||
BenchmarkOptions: BenchmarkOptions{
|
ConfigClientMachineBenchmarkOptions: &dbtesterpb.ConfigClientMachineBenchmarkOptions{
|
||||||
Type: "write",
|
Type: "write",
|
||||||
RequestNumber: 1000000,
|
RequestNumber: 1000000,
|
||||||
ConnectionNumber: 0,
|
ConnectionNumber: 0,
|
||||||
|
|
@ -149,7 +154,7 @@ func TestConfig(t *testing.T) {
|
||||||
ValueSizeBytes: 1024,
|
ValueSizeBytes: 1024,
|
||||||
StaleRead: false,
|
StaleRead: false,
|
||||||
},
|
},
|
||||||
BenchmarkSteps: BenchmarkSteps{
|
ConfigClientMachineBenchmarkSteps: &dbtesterpb.ConfigClientMachineBenchmarkSteps{
|
||||||
Step1StartDatabase: true,
|
Step1StartDatabase: true,
|
||||||
Step2StressDatabase: true,
|
Step2StressDatabase: true,
|
||||||
Step3StopDatabase: true,
|
Step3StopDatabase: true,
|
||||||
|
|
@ -157,331 +162,331 @@ func TestConfig(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
DatabaseIDToTestData: map[string]TestData{
|
DatabaseIDToConfigAnalyzeMachineInitial: map[string]dbtesterpb.ConfigAnalyzeMachineInitial{
|
||||||
"etcdv3": {
|
"etcd__tip": {
|
||||||
DatabaseID: "etcdv3",
|
DatabaseID: "etcd__tip",
|
||||||
DatabaseTag: "etcd-v3.1-go1.7.5",
|
DatabaseTag: "etcd-tip-go1.8.0",
|
||||||
DatabaseDescription: "etcd v3.1 (Go 1.7.5)",
|
DatabaseDescription: "etcd tip (Go 1.8.0)",
|
||||||
PathPrefix: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5",
|
PathPrefix: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0",
|
||||||
|
|
||||||
ClientSystemMetricsInterpolatedPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5-client-system-metrics-interpolated.csv",
|
ClientSystemMetricsInterpolatedPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0-client-system-metrics-interpolated.csv",
|
||||||
ClientLatencyThroughputTimeseriesPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5-client-latency-throughput-timeseries.csv",
|
ClientLatencyThroughputTimeseriesPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0-client-latency-throughput-timeseries.csv",
|
||||||
ClientLatencyDistributionAllPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5-client-latency-distribution-all.csv",
|
ClientLatencyDistributionAllPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0-client-latency-distribution-all.csv",
|
||||||
ClientLatencyDistributionPercentilePath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5-client-latency-distribution-percentile.csv",
|
ClientLatencyDistributionPercentilePath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0-client-latency-distribution-percentile.csv",
|
||||||
ClientLatencyDistributionSummaryPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5-client-latency-distribution-summary.csv",
|
ClientLatencyDistributionSummaryPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0-client-latency-distribution-summary.csv",
|
||||||
ClientLatencyByKeyNumberPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5-client-latency-by-key-number.csv",
|
ClientLatencyByKeyNumberPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0-client-latency-by-key-number.csv",
|
||||||
ServerMemoryByKeyNumberPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5-server-memory-by-key-number.csv",
|
ServerMemoryByKeyNumberPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0-server-memory-by-key-number.csv",
|
||||||
ServerReadBytesDeltaByKeyNumberPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5-server-read-bytes-delta-by-key-number.csv",
|
ServerReadBytesDeltaByKeyNumberPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0-server-read-bytes-delta-by-key-number.csv",
|
||||||
ServerWriteBytesDeltaByKeyNumberPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5-server-write-bytes-delta-by-key-number.csv",
|
ServerWriteBytesDeltaByKeyNumberPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0-server-write-bytes-delta-by-key-number.csv",
|
||||||
ServerDiskSpaceUsageSummaryPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5-server-disk-space-usage-summary.csv",
|
ServerDiskSpaceUsageSummaryPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0-server-disk-space-usage-summary.csv",
|
||||||
ServerSystemMetricsInterpolatedPathList: []string{
|
ServerSystemMetricsInterpolatedPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5-1-server-system-metrics-interpolated.csv",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0-1-server-system-metrics-interpolated.csv",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5-2-server-system-metrics-interpolated.csv",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0-2-server-system-metrics-interpolated.csv",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5-3-server-system-metrics-interpolated.csv",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0-3-server-system-metrics-interpolated.csv",
|
||||||
},
|
},
|
||||||
AllAggregatedOutputPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5-all-aggregated.csv",
|
AllAggregatedOutputPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0-all-aggregated.csv",
|
||||||
},
|
},
|
||||||
"zookeeper": {
|
"zookeeper__r3_5_2_alpha": {
|
||||||
DatabaseID: "zookeeper",
|
DatabaseID: "zookeeper__r3_5_2_alpha",
|
||||||
DatabaseTag: "zookeeper-r3.4.9-java8",
|
DatabaseTag: "zookeeper-r3.5.2-alpha-java8",
|
||||||
DatabaseDescription: "Zookeeper r3.4.9 (Java 8)",
|
DatabaseDescription: "Zookeeper r3.5.2-alpha (Java 8)",
|
||||||
PathPrefix: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8",
|
PathPrefix: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8",
|
||||||
|
|
||||||
ClientSystemMetricsInterpolatedPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-client-system-metrics-interpolated.csv",
|
ClientSystemMetricsInterpolatedPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8-client-system-metrics-interpolated.csv",
|
||||||
ClientLatencyThroughputTimeseriesPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-client-latency-throughput-timeseries.csv",
|
ClientLatencyThroughputTimeseriesPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8-client-latency-throughput-timeseries.csv",
|
||||||
ClientLatencyDistributionAllPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-client-latency-distribution-all.csv",
|
ClientLatencyDistributionAllPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8-client-latency-distribution-all.csv",
|
||||||
ClientLatencyDistributionPercentilePath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-client-latency-distribution-percentile.csv",
|
ClientLatencyDistributionPercentilePath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8-client-latency-distribution-percentile.csv",
|
||||||
ClientLatencyDistributionSummaryPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-client-latency-distribution-summary.csv",
|
ClientLatencyDistributionSummaryPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8-client-latency-distribution-summary.csv",
|
||||||
ClientLatencyByKeyNumberPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-client-latency-by-key-number.csv",
|
ClientLatencyByKeyNumberPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8-client-latency-by-key-number.csv",
|
||||||
ServerMemoryByKeyNumberPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-server-memory-by-key-number.csv",
|
ServerMemoryByKeyNumberPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8-server-memory-by-key-number.csv",
|
||||||
ServerReadBytesDeltaByKeyNumberPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-server-read-bytes-delta-by-key-number.csv",
|
ServerReadBytesDeltaByKeyNumberPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8-server-read-bytes-delta-by-key-number.csv",
|
||||||
ServerWriteBytesDeltaByKeyNumberPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-server-write-bytes-delta-by-key-number.csv",
|
ServerWriteBytesDeltaByKeyNumberPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8-server-write-bytes-delta-by-key-number.csv",
|
||||||
ServerDiskSpaceUsageSummaryPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-server-disk-space-usage-summary.csv",
|
ServerDiskSpaceUsageSummaryPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8-server-disk-space-usage-summary.csv",
|
||||||
ServerSystemMetricsInterpolatedPathList: []string{
|
ServerSystemMetricsInterpolatedPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-1-server-system-metrics-interpolated.csv",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8-1-server-system-metrics-interpolated.csv",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-2-server-system-metrics-interpolated.csv",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8-2-server-system-metrics-interpolated.csv",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-3-server-system-metrics-interpolated.csv",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8-3-server-system-metrics-interpolated.csv",
|
||||||
},
|
},
|
||||||
AllAggregatedOutputPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-all-aggregated.csv",
|
AllAggregatedOutputPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8-all-aggregated.csv",
|
||||||
},
|
},
|
||||||
"consul": {
|
"consul__v0_7_5": {
|
||||||
DatabaseID: "consul",
|
DatabaseID: "consul__v0_7_5",
|
||||||
DatabaseTag: "consul-v0.7.4-go1.7.5",
|
DatabaseTag: "consul-v0.7.5-go1.8.0",
|
||||||
DatabaseDescription: "Consul v0.7.4 (Go 1.7.5)",
|
DatabaseDescription: "Consul v0.7.5 (Go 1.8.0)",
|
||||||
PathPrefix: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5",
|
PathPrefix: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0",
|
||||||
|
|
||||||
ClientSystemMetricsInterpolatedPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5-client-system-metrics-interpolated.csv",
|
ClientSystemMetricsInterpolatedPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0-client-system-metrics-interpolated.csv",
|
||||||
ClientLatencyThroughputTimeseriesPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5-client-latency-throughput-timeseries.csv",
|
ClientLatencyThroughputTimeseriesPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0-client-latency-throughput-timeseries.csv",
|
||||||
ClientLatencyDistributionAllPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5-client-latency-distribution-all.csv",
|
ClientLatencyDistributionAllPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0-client-latency-distribution-all.csv",
|
||||||
ClientLatencyDistributionPercentilePath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5-client-latency-distribution-percentile.csv",
|
ClientLatencyDistributionPercentilePath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0-client-latency-distribution-percentile.csv",
|
||||||
ClientLatencyDistributionSummaryPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5-client-latency-distribution-summary.csv",
|
ClientLatencyDistributionSummaryPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0-client-latency-distribution-summary.csv",
|
||||||
ClientLatencyByKeyNumberPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5-client-latency-by-key-number.csv",
|
ClientLatencyByKeyNumberPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0-client-latency-by-key-number.csv",
|
||||||
ServerMemoryByKeyNumberPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5-server-memory-by-key-number.csv",
|
ServerMemoryByKeyNumberPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0-server-memory-by-key-number.csv",
|
||||||
ServerReadBytesDeltaByKeyNumberPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5-server-read-bytes-delta-by-key-number.csv",
|
ServerReadBytesDeltaByKeyNumberPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0-server-read-bytes-delta-by-key-number.csv",
|
||||||
ServerWriteBytesDeltaByKeyNumberPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5-server-write-bytes-delta-by-key-number.csv",
|
ServerWriteBytesDeltaByKeyNumberPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0-server-write-bytes-delta-by-key-number.csv",
|
||||||
ServerDiskSpaceUsageSummaryPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5-server-disk-space-usage-summary.csv",
|
ServerDiskSpaceUsageSummaryPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0-server-disk-space-usage-summary.csv",
|
||||||
ServerSystemMetricsInterpolatedPathList: []string{
|
ServerSystemMetricsInterpolatedPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5-1-server-system-metrics-interpolated.csv",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0-1-server-system-metrics-interpolated.csv",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5-2-server-system-metrics-interpolated.csv",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0-2-server-system-metrics-interpolated.csv",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5-3-server-system-metrics-interpolated.csv",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0-3-server-system-metrics-interpolated.csv",
|
||||||
},
|
},
|
||||||
AllAggregatedOutputPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5-all-aggregated.csv",
|
AllAggregatedOutputPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0-all-aggregated.csv",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Analyze: Analyze{
|
ConfigAnalyzeMachineAllAggregatedOutput: dbtesterpb.ConfigAnalyzeMachineAllAggregatedOutput{
|
||||||
AllAggregatedOutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/all-aggregated.csv",
|
AllAggregatedOutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/all-aggregated.csv",
|
||||||
AllAggregatedOutputPathTXT: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/all-aggregated.txt",
|
AllAggregatedOutputPathTXT: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/all-aggregated.txt",
|
||||||
},
|
},
|
||||||
PlotPathPrefix: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable",
|
AnalyzePlotPathPrefix: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable",
|
||||||
PlotList: []Plot{
|
AnalyzePlotList: []dbtesterpb.ConfigAnalyzeMachinePlot{
|
||||||
{
|
{
|
||||||
Column: "AVG-LATENCY-MS",
|
Column: "AVG-LATENCY-MS",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "Latency(millisecond)",
|
YAxis: "Latency(millisecond)",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Column: "AVG-THROUGHPUT",
|
Column: "AVG-THROUGHPUT",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "Throughput(Requests/Second)",
|
YAxis: "Throughput(Requests/Second)",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Column: "AVG-VOLUNTARY-CTXT-SWITCHES",
|
Column: "AVG-VOLUNTARY-CTXT-SWITCHES",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "Voluntary Context Switches",
|
YAxis: "Voluntary Context Switches",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Column: "AVG-NON-VOLUNTARY-CTXT-SWITCHES",
|
Column: "AVG-NON-VOLUNTARY-CTXT-SWITCHES",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "Non-voluntary Context Switches",
|
YAxis: "Non-voluntary Context Switches",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Column: "AVG-CPU",
|
Column: "AVG-CPU",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "CPU(%)",
|
YAxis: "CPU(%)",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Column: "MAX-CPU",
|
Column: "MAX-CPU",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "CPU(%)",
|
YAxis: "CPU(%)",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Column: "AVG-VMRSS-MB",
|
Column: "AVG-VMRSS-MB",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "Memory(MB)",
|
YAxis: "Memory(MB)",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Column: "AVG-READS-COMPLETED-DELTA",
|
Column: "AVG-READS-COMPLETED-DELTA",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "Disk Reads (Delta per Second)",
|
YAxis: "Disk Reads (Delta per Second)",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Column: "AVG-SECTORS-READ-DELTA",
|
Column: "AVG-SECTORS-READ-DELTA",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "Sectors Read (Delta per Second)",
|
YAxis: "Sectors Read (Delta per Second)",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Column: "AVG-WRITES-COMPLETED-DELTA",
|
Column: "AVG-WRITES-COMPLETED-DELTA",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "Disk Writes (Delta per Second)",
|
YAxis: "Disk Writes (Delta per Second)",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Column: "AVG-SECTORS-WRITTEN-DELTA",
|
Column: "AVG-SECTORS-WRITTEN-DELTA",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "Sectors Written (Delta per Second)",
|
YAxis: "Sectors Written (Delta per Second)",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Column: "AVG-READ-BYTES-DELTA",
|
Column: "AVG-READ-BYTES-DELTA",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "Read Bytes (Delta per Second)",
|
YAxis: "Read Bytes (Delta per Second)",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Column: "AVG-WRITE-BYTES-DELTA",
|
Column: "AVG-WRITE-BYTES-DELTA",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "Write Bytes (Delta per Second)",
|
YAxis: "Write Bytes (Delta per Second)",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Column: "AVG-RECEIVE-BYTES-NUM-DELTA",
|
Column: "AVG-RECEIVE-BYTES-NUM-DELTA",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "Network Receive(bytes) (Delta per Second)",
|
YAxis: "Network Receive(bytes) (Delta per Second)",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Column: "AVG-TRANSMIT-BYTES-NUM-DELTA",
|
Column: "AVG-TRANSMIT-BYTES-NUM-DELTA",
|
||||||
XAxis: "Second",
|
XAxis: "Second",
|
||||||
YAxis: "Network Transmit(bytes) (Delta per Second)",
|
YAxis: "Network Transmit(bytes) (Delta per Second)",
|
||||||
OutputPathCSV: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA.csv",
|
OutputPathCSV: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA.csv",
|
||||||
OutputPathList: []string{
|
OutputPathList: []string{
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA.svg",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA.svg",
|
||||||
"2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA.png",
|
"2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA.png",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
README: README{
|
ConfigAnalyzeMachineREADME: dbtesterpb.ConfigAnalyzeMachineREADME{
|
||||||
OutputPath: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/README.md",
|
OutputPath: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/README.md",
|
||||||
|
|
||||||
Images: []Image{
|
Images: []*dbtesterpb.ConfigAnalyzeMachineImage{
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Title: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA",
|
Title: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA",
|
||||||
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA.svg",
|
Path: "https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA.svg",
|
||||||
Type: "remote",
|
Type: "remote",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -491,26 +496,26 @@ func TestConfig(t *testing.T) {
|
||||||
t.Fatalf("configuration expected\n%+v\n, got\n%+v\n", expected, cfg)
|
t.Fatalf("configuration expected\n%+v\n, got\n%+v\n", expected, cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
req1, err := cfg.ToRequest("etcdv3", dbtesterpb.Request_Start, 0)
|
req1, err := cfg.ToRequest("etcd__tip", dbtesterpb.Operation_Start, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
expected1 := &dbtesterpb.Request{
|
expected1 := &dbtesterpb.Request{
|
||||||
Operation: dbtesterpb.Request_Start,
|
Operation: dbtesterpb.Operation_Start,
|
||||||
TriggerLogUpload: true,
|
TriggerLogUpload: true,
|
||||||
DatabaseID: dbtesterpb.Request_etcdv3,
|
DatabaseID: dbtesterpb.DatabaseID_etcd__tip,
|
||||||
DatabaseTag: "etcd-v3.1-go1.7.5",
|
DatabaseTag: "etcd-tip-go1.8.0",
|
||||||
PeerIPsString: "10.240.0.20___10.240.0.21___10.240.0.22",
|
PeerIPsString: "10.240.0.7___10.240.0.8___10.240.0.12",
|
||||||
IpIndex: 0,
|
IPIndex: 0,
|
||||||
CurrentClientNumber: 0,
|
CurrentClientNumber: 0,
|
||||||
Control: &dbtesterpb.Request_Control{
|
ConfigClientMachineInitial: &dbtesterpb.ConfigClientMachineInitial{
|
||||||
GoogleCloudProjectName: "etcd-development",
|
GoogleCloudProjectName: "etcd-development",
|
||||||
GoogleCloudStorageKey: "test-key",
|
GoogleCloudStorageKey: "test-key",
|
||||||
GoogleCloudStorageBucketName: "dbtester-results",
|
GoogleCloudStorageBucketName: "dbtester-results",
|
||||||
GoogleCloudStorageSubDirectory: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable",
|
GoogleCloudStorageSubDirectory: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable",
|
||||||
},
|
},
|
||||||
Etcdv3Config: &dbtesterpb.Request_Etcdv3{
|
Flag_Etcd_Tip: &dbtesterpb.Flag_Etcd_Tip{
|
||||||
SnapCount: 100000,
|
SnapshotCount: 100000,
|
||||||
QuotaSizeBytes: 8000000000,
|
QuotaSizeBytes: 8000000000,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
@ -518,28 +523,31 @@ func TestConfig(t *testing.T) {
|
||||||
t.Fatalf("configuration expected\n%+v\n, got\n%+v\n", expected1, req1)
|
t.Fatalf("configuration expected\n%+v\n, got\n%+v\n", expected1, req1)
|
||||||
}
|
}
|
||||||
|
|
||||||
req2, err := cfg.ToRequest("zookeeper", dbtesterpb.Request_Start, 2)
|
req2, err := cfg.ToRequest("zookeeper__r3_5_2_alpha", dbtesterpb.Operation_Start, 2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
expected2 := &dbtesterpb.Request{
|
expected2 := &dbtesterpb.Request{
|
||||||
Operation: dbtesterpb.Request_Start,
|
Operation: dbtesterpb.Operation_Start,
|
||||||
TriggerLogUpload: true,
|
TriggerLogUpload: true,
|
||||||
DatabaseID: dbtesterpb.Request_zookeeper,
|
DatabaseID: dbtesterpb.DatabaseID_zookeeper__r3_5_2_alpha,
|
||||||
DatabaseTag: "zookeeper-r3.4.9-java8",
|
DatabaseTag: "zookeeper-r3.5.2-alpha-java8",
|
||||||
PeerIPsString: "10.240.0.25___10.240.0.27___10.240.0.28",
|
PeerIPsString: "10.240.0.21___10.240.0.22___10.240.0.23",
|
||||||
IpIndex: 2,
|
IPIndex: 2,
|
||||||
CurrentClientNumber: 0,
|
CurrentClientNumber: 0,
|
||||||
Control: &dbtesterpb.Request_Control{
|
ConfigClientMachineInitial: &dbtesterpb.ConfigClientMachineInitial{
|
||||||
GoogleCloudProjectName: "etcd-development",
|
GoogleCloudProjectName: "etcd-development",
|
||||||
GoogleCloudStorageKey: "test-key",
|
GoogleCloudStorageKey: "test-key",
|
||||||
GoogleCloudStorageBucketName: "dbtester-results",
|
GoogleCloudStorageBucketName: "dbtester-results",
|
||||||
GoogleCloudStorageSubDirectory: "2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable",
|
GoogleCloudStorageSubDirectory: "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable",
|
||||||
},
|
},
|
||||||
ZookeeperConfig: &dbtesterpb.Request_Zookeeper{
|
Flag_Zookeeper_R3_5_2Alpha: &dbtesterpb.Flag_Zookeeper_R3_5_2Alpha{
|
||||||
|
JavaDJuteMaxBuffer: 33554432,
|
||||||
|
JavaXms: "50G",
|
||||||
|
JavaXmx: "50G",
|
||||||
MyID: 3,
|
MyID: 3,
|
||||||
TickTime: 2000,
|
|
||||||
ClientPort: 2181,
|
ClientPort: 2181,
|
||||||
|
TickTime: 2000,
|
||||||
InitLimit: 5,
|
InitLimit: 5,
|
||||||
SyncLimit: 5,
|
SyncLimit: 5,
|
||||||
SnapCount: 100000,
|
SnapCount: 100000,
|
||||||
|
|
|
||||||
|
|
@ -1,18 +1,19 @@
|
||||||
test_title: Write 1M keys, 256-byte key, 1KB value value, clients 1 to 1,000
|
test_title: Write 1M keys, 256-byte key, 1KB value value, clients 1 to 1,000
|
||||||
test_description: |
|
test_description: |
|
||||||
- Google Cloud Compute Engine
|
- Google Cloud Compute Engine
|
||||||
- 4 machines of 16 vCPUs + 30 GB Memory + 300 GB SSD (1 for client)
|
- 4 machines of 16 vCPUs + 60 GB Memory + 300 GB SSD (1 for client)
|
||||||
- Ubuntu 16.10
|
- Ubuntu 16.10
|
||||||
- etcd v3.1 (Go 1.7.5)
|
- etcd tip (Go 1.8.0)
|
||||||
- Zookeeper r3.4.9
|
- Zookeeper r3.5.2-alpha
|
||||||
- Java 8
|
- Java 8
|
||||||
- javac 1.8.0_121
|
- javac 1.8.0_121
|
||||||
- Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
|
- Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
|
||||||
- Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)
|
- Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)
|
||||||
- Consul v0.7.4 (Go 1.7.5)
|
- `/usr/bin/java -Djute.maxbuffer=33554432 -Xms50G -Xmx50G`
|
||||||
|
- Consul v0.7.5 (Go 1.8.0)
|
||||||
|
|
||||||
# common control options for all client machines
|
# common control options for all client machines
|
||||||
control:
|
config_client_machine_initial:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: /home/gyuho
|
path_prefix: /home/gyuho
|
||||||
log_path: client-control.log
|
log_path: client-control.log
|
||||||
|
|
@ -28,24 +29,23 @@ control:
|
||||||
# (optional) to automatically upload all files in client machine
|
# (optional) to automatically upload all files in client machine
|
||||||
google_cloud_project_name: etcd-development
|
google_cloud_project_name: etcd-development
|
||||||
# set this in 'control' machine, to automate log uploading in remote 'agent' machines
|
# set this in 'control' machine, to automate log uploading in remote 'agent' machines
|
||||||
# google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
|
||||||
google_cloud_storage_key_path: config-dbtester-gcloud-key.json
|
google_cloud_storage_key_path: config-dbtester-gcloud-key.json
|
||||||
google_cloud_storage_bucket_name: dbtester-results
|
google_cloud_storage_bucket_name: dbtester-results
|
||||||
google_cloud_storage_sub_directory: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable
|
google_cloud_storage_sub_directory: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable
|
||||||
|
|
||||||
all_database_id_list: [etcdv3, zookeeper, consul]
|
all_database_id_list: [etcd__tip, zookeeper__r3_5_2_alpha, consul__v0_7_5]
|
||||||
|
|
||||||
datatbase_id_to_test_group:
|
datatbase_id_to_config_client_machine_agent_control:
|
||||||
etcdv3:
|
etcd__tip:
|
||||||
database_description: etcd v3.1 (Go 1.7.5)
|
database_description: etcd tip (Go 1.8.0)
|
||||||
peer_ips:
|
peer_ips:
|
||||||
- 10.240.0.20
|
- 10.240.0.7
|
||||||
- 10.240.0.21
|
- 10.240.0.8
|
||||||
- 10.240.0.22
|
- 10.240.0.12
|
||||||
database_port_to_connect: 2379
|
database_port_to_connect: 2379
|
||||||
agent_port_to_connect: 3500
|
agent_port_to_connect: 3500
|
||||||
|
|
||||||
etcdv3:
|
etcd__tip:
|
||||||
# --snapshot-count
|
# --snapshot-count
|
||||||
snap_count: 100000
|
snap_count: 100000
|
||||||
# --quota-backend-bytes; 8 GB
|
# --quota-backend-bytes; 8 GB
|
||||||
|
|
@ -75,17 +75,25 @@ datatbase_id_to_test_group:
|
||||||
step3_stop_database: true
|
step3_stop_database: true
|
||||||
step4_upload_logs: true
|
step4_upload_logs: true
|
||||||
|
|
||||||
zookeeper:
|
zookeeper__r3_5_2_alpha:
|
||||||
database_description: Zookeeper r3.4.9 (Java 8)
|
database_description: Zookeeper r3.5.2-alpha (Java 8)
|
||||||
peer_ips:
|
peer_ips:
|
||||||
- 10.240.0.25
|
- 10.240.0.21
|
||||||
- 10.240.0.27
|
- 10.240.0.22
|
||||||
- 10.240.0.28
|
- 10.240.0.23
|
||||||
database_port_to_connect: 2181
|
database_port_to_connect: 2181
|
||||||
agent_port_to_connect: 3500
|
agent_port_to_connect: 3500
|
||||||
|
|
||||||
# http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html
|
# http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html
|
||||||
zookeeper:
|
zookeeper__r3_5_2_alpha:
|
||||||
|
# maximum size, in bytes, of a request or response
|
||||||
|
# set it to 33 MB
|
||||||
|
java_d_jute_max_buffer: 33554432
|
||||||
|
|
||||||
|
# JVM min,max heap size
|
||||||
|
java_xms: 50G
|
||||||
|
java_xmx: 50G
|
||||||
|
|
||||||
# tickTime; the length of a single tick, which is the basic time unit used by ZooKeeper,
|
# tickTime; the length of a single tick, which is the basic time unit used by ZooKeeper,
|
||||||
# as measured in milliseconds.
|
# as measured in milliseconds.
|
||||||
tick_time: 2000
|
tick_time: 2000
|
||||||
|
|
@ -131,12 +139,12 @@ datatbase_id_to_test_group:
|
||||||
step3_stop_database: true
|
step3_stop_database: true
|
||||||
step4_upload_logs: true
|
step4_upload_logs: true
|
||||||
|
|
||||||
consul:
|
consul__v0_7_5:
|
||||||
database_description: Consul v0.7.4 (Go 1.7.5)
|
database_description: Consul v0.7.5 (Go 1.8.0)
|
||||||
peer_ips:
|
peer_ips:
|
||||||
- 10.240.0.30
|
- 10.240.0.27
|
||||||
- 10.240.0.31
|
- 10.240.0.28
|
||||||
- 10.240.0.33
|
- 10.240.0.29
|
||||||
database_port_to_connect: 8500
|
database_port_to_connect: 8500
|
||||||
agent_port_to_connect: 3500
|
agent_port_to_connect: 3500
|
||||||
|
|
||||||
|
|
@ -165,10 +173,10 @@ datatbase_id_to_test_group:
|
||||||
step4_upload_logs: true
|
step4_upload_logs: true
|
||||||
|
|
||||||
|
|
||||||
datatbase_id_to_test_data:
|
datatbase_id_to_config_analyze_machine_initial:
|
||||||
etcdv3:
|
etcd__tip:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5
|
path_prefix: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0
|
||||||
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
||||||
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
||||||
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
||||||
|
|
@ -185,9 +193,9 @@ datatbase_id_to_test_data:
|
||||||
- 3-server-system-metrics-interpolated.csv
|
- 3-server-system-metrics-interpolated.csv
|
||||||
all_aggregated_output_path: all-aggregated.csv
|
all_aggregated_output_path: all-aggregated.csv
|
||||||
|
|
||||||
zookeeper:
|
zookeeper__r3_5_2_alpha:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8
|
path_prefix: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8
|
||||||
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
||||||
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
||||||
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
||||||
|
|
@ -204,9 +212,9 @@ datatbase_id_to_test_data:
|
||||||
- 3-server-system-metrics-interpolated.csv
|
- 3-server-system-metrics-interpolated.csv
|
||||||
all_aggregated_output_path: all-aggregated.csv
|
all_aggregated_output_path: all-aggregated.csv
|
||||||
|
|
||||||
consul:
|
consul__v0_7_5:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5
|
path_prefix: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0
|
||||||
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
||||||
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
||||||
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
||||||
|
|
@ -223,12 +231,12 @@ datatbase_id_to_test_data:
|
||||||
- 3-server-system-metrics-interpolated.csv
|
- 3-server-system-metrics-interpolated.csv
|
||||||
all_aggregated_output_path: all-aggregated.csv
|
all_aggregated_output_path: all-aggregated.csv
|
||||||
|
|
||||||
analyze:
|
analyze_all_aggregated_output:
|
||||||
all_aggregated_output_path_csv: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/all-aggregated.csv
|
all_aggregated_output_path_csv: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/all-aggregated.csv
|
||||||
all_aggregated_output_path_txt: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/all-aggregated.txt
|
all_aggregated_output_path_txt: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/all-aggregated.txt
|
||||||
|
|
||||||
plot_path_prefix: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable
|
analyze_plot_path_prefix: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable
|
||||||
plot_list:
|
analyze_plot_list:
|
||||||
- column: AVG-LATENCY-MS
|
- column: AVG-LATENCY-MS
|
||||||
x_axis: Second
|
x_axis: Second
|
||||||
y_axis: Latency(millisecond)
|
y_axis: Latency(millisecond)
|
||||||
|
|
@ -289,82 +297,82 @@ plot_list:
|
||||||
x_axis: Second
|
x_axis: Second
|
||||||
y_axis: Network Transmit(bytes) (Delta per Second)
|
y_axis: Network Transmit(bytes) (Delta per Second)
|
||||||
|
|
||||||
readme:
|
analyze_readme:
|
||||||
output_path: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/README.md
|
output_path: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/README.md
|
||||||
|
|
||||||
images:
|
images:
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
|
||||||
|
|
@ -1,21 +0,0 @@
|
||||||
// Copyright 2017 CoreOS, Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package dbtester
|
|
||||||
|
|
||||||
// Etcdv2 is etcdv2-specific flags
|
|
||||||
// (https://github.com/coreos/etcd/blob/master/etcdmain/help.go).
|
|
||||||
type Etcdv2 struct {
|
|
||||||
// no options needed yet
|
|
||||||
}
|
|
||||||
|
|
@ -1,22 +0,0 @@
|
||||||
// Copyright 2017 CoreOS, Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package dbtester
|
|
||||||
|
|
||||||
// Etcdv3 is etcdv3-specific flags
|
|
||||||
// (https://github.com/coreos/etcd/blob/master/etcdmain/help.go).
|
|
||||||
type Etcdv3 struct {
|
|
||||||
SnapCount int64 `yaml:"snap_count"`
|
|
||||||
QuotaSizeBytes int64 `yaml:"quota_size_bytes"`
|
|
||||||
}
|
|
||||||
|
|
@ -1,21 +0,0 @@
|
||||||
// Copyright 2017 CoreOS, Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package dbtester
|
|
||||||
|
|
||||||
// Zetcd is zetcd-specific flags
|
|
||||||
// (https://github.com/coreos/zetcd).
|
|
||||||
type Zetcd struct {
|
|
||||||
// no options needed yet
|
|
||||||
}
|
|
||||||
|
|
@ -1,25 +0,0 @@
|
||||||
// Copyright 2017 CoreOS, Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package dbtester
|
|
||||||
|
|
||||||
// Zookeeper is Zookeeper-specific flags
|
|
||||||
// (http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html).
|
|
||||||
type Zookeeper struct {
|
|
||||||
TickTime int64 `yaml:"tick_time"`
|
|
||||||
InitLimit int64 `yaml:"init_limit"`
|
|
||||||
SyncLimit int64 `yaml:"sync_limit"`
|
|
||||||
SnapCount int64 `yaml:"snap_count"`
|
|
||||||
MaxClientConnections int64 `yaml:"max_client_connections"`
|
|
||||||
}
|
|
||||||
|
|
@ -55,47 +55,39 @@ func init() {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
Command.PersistentFlags().StringVar(&databaseID, "database-id", "etcdv3", "etcdv2, etcdv3, zookeeper, consul, zetcd, cetcd.")
|
Command.PersistentFlags().StringVar(&databaseID, "database-id", "etcd__tip", "See dbtesterpb/database_id.pb.go")
|
||||||
Command.PersistentFlags().StringVarP(&configPath, "config", "c", "", "YAML configuration file path.")
|
Command.PersistentFlags().StringVarP(&configPath, "config", "c", "", "YAML configuration file path.")
|
||||||
Command.PersistentFlags().StringVar(&diskDevice, "disk-device", dn, "Disk device to collect disk statistics metrics from.")
|
Command.PersistentFlags().StringVar(&diskDevice, "disk-device", dn, "Disk device to collect disk statistics metrics from.")
|
||||||
Command.PersistentFlags().StringVar(&networkInterface, "network-interface", nt, "Network interface to record in/outgoing packets.")
|
Command.PersistentFlags().StringVar(&networkInterface, "network-interface", nt, "Network interface to record in/outgoing packets.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func commandFunc(cmd *cobra.Command, args []string) error {
|
func commandFunc(cmd *cobra.Command, args []string) error {
|
||||||
switch databaseID {
|
if !dbtesterpb.IsValidDatabaseID(databaseID) {
|
||||||
case "etcdv2":
|
return fmt.Errorf("database id %q is unknown", databaseID)
|
||||||
case "etcdv3":
|
|
||||||
case "zookeeper":
|
|
||||||
case "zetcd":
|
|
||||||
case "consul":
|
|
||||||
case "cetcd":
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("%q is not supported", databaseID)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg, err := dbtester.ReadConfig(configPath, false)
|
cfg, err := dbtester.ReadConfig(configPath, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
gcfg, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[databaseID]
|
||||||
gcfg, ok := cfg.DatabaseIDToTestGroup[databaseID]
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("%q is not found", databaseID)
|
return fmt.Errorf("%q is not found", databaseID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if gcfg.BenchmarkSteps.Step2StressDatabase {
|
if gcfg.ConfigClientMachineBenchmarkSteps.Step2StressDatabase {
|
||||||
switch gcfg.BenchmarkOptions.Type {
|
switch gcfg.ConfigClientMachineBenchmarkOptions.Type {
|
||||||
case "write":
|
case "write":
|
||||||
case "read":
|
case "read":
|
||||||
case "read-oneshot":
|
case "read-oneshot":
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("%q is not supported", gcfg.BenchmarkOptions.Type)
|
return fmt.Errorf("%q is not supported", gcfg.ConfigClientMachineBenchmarkOptions.Type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pid := int64(os.Getpid())
|
pid := int64(os.Getpid())
|
||||||
plog.Infof("starting collecting system metrics at %q [disk device: %q | network interface: %q | PID: %d]", cfg.Control.ClientSystemMetricsPath, diskDevice, networkInterface, pid)
|
plog.Infof("starting collecting system metrics at %q [disk device: %q | network interface: %q | PID: %d]", cfg.ConfigClientMachineInitial.ClientSystemMetricsPath, diskDevice, networkInterface, pid)
|
||||||
if err = os.RemoveAll(cfg.Control.ClientSystemMetricsPath); err != nil {
|
if err = os.RemoveAll(cfg.ConfigClientMachineInitial.ClientSystemMetricsPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tcfg := &psn.TopConfig{
|
tcfg := &psn.TopConfig{
|
||||||
|
|
@ -105,7 +97,7 @@ func commandFunc(cmd *cobra.Command, args []string) error {
|
||||||
}
|
}
|
||||||
var metricsCSV *psn.CSV
|
var metricsCSV *psn.CSV
|
||||||
metricsCSV, err = psn.NewCSV(
|
metricsCSV, err = psn.NewCSV(
|
||||||
cfg.Control.ClientSystemMetricsPath,
|
cfg.ConfigClientMachineInitial.ClientSystemMetricsPath,
|
||||||
pid,
|
pid,
|
||||||
diskDevice,
|
diskDevice,
|
||||||
networkInterface,
|
networkInterface,
|
||||||
|
|
@ -127,7 +119,7 @@ func commandFunc(cmd *cobra.Command, args []string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-donec:
|
case <-donec:
|
||||||
plog.Infof("finishing collecting system metrics; saving CSV at %q", cfg.Control.ClientSystemMetricsPath)
|
plog.Infof("finishing collecting system metrics; saving CSV at %q", cfg.ConfigClientMachineInitial.ClientSystemMetricsPath)
|
||||||
|
|
||||||
if err := metricsCSV.Save(); err != nil {
|
if err := metricsCSV.Save(); err != nil {
|
||||||
plog.Errorf("psn.CSV.Save(%q) error %v", metricsCSV.FilePath, err)
|
plog.Errorf("psn.CSV.Save(%q) error %v", metricsCSV.FilePath, err)
|
||||||
|
|
@ -139,7 +131,7 @@ func commandFunc(cmd *cobra.Command, args []string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.Fatalf("psn.CSV.Interpolate(%q) failed with %v", metricsCSV.FilePath, err)
|
plog.Fatalf("psn.CSV.Interpolate(%q) failed with %v", metricsCSV.FilePath, err)
|
||||||
}
|
}
|
||||||
interpolated.FilePath = cfg.Control.ClientSystemMetricsInterpolatedPath
|
interpolated.FilePath = cfg.ConfigClientMachineInitial.ClientSystemMetricsInterpolatedPath
|
||||||
if err := interpolated.Save(); err != nil {
|
if err := interpolated.Save(); err != nil {
|
||||||
plog.Errorf("psn.CSV.Save(%q) error %v", interpolated.FilePath, err)
|
plog.Errorf("psn.CSV.Save(%q) error %v", interpolated.FilePath, err)
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -158,14 +150,14 @@ func commandFunc(cmd *cobra.Command, args []string) error {
|
||||||
plog.Infof("npt update error: %v", nerr)
|
plog.Infof("npt update error: %v", nerr)
|
||||||
|
|
||||||
println()
|
println()
|
||||||
if gcfg.BenchmarkSteps.Step1StartDatabase {
|
if gcfg.ConfigClientMachineBenchmarkSteps.Step1StartDatabase {
|
||||||
plog.Info("step 1: starting databases...")
|
plog.Info("step 1: starting databases...")
|
||||||
if _, err = cfg.BroadcaseRequest(databaseID, dbtesterpb.Request_Start); err != nil {
|
if _, err = cfg.BroadcaseRequest(databaseID, dbtesterpb.Operation_Start); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if gcfg.BenchmarkSteps.Step2StressDatabase {
|
if gcfg.ConfigClientMachineBenchmarkSteps.Step2StressDatabase {
|
||||||
println()
|
println()
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
println()
|
println()
|
||||||
|
|
@ -175,14 +167,14 @@ func commandFunc(cmd *cobra.Command, args []string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if gcfg.BenchmarkSteps.Step3StopDatabase {
|
if gcfg.ConfigClientMachineBenchmarkSteps.Step3StopDatabase {
|
||||||
println()
|
println()
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
println()
|
println()
|
||||||
plog.Info("step 3: stopping tests...")
|
plog.Info("step 3: stopping tests...")
|
||||||
var idxToResp map[int]dbtesterpb.Response
|
var idxToResp map[int]dbtesterpb.Response
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
idxToResp, err = cfg.BroadcaseRequest(databaseID, dbtesterpb.Request_Stop)
|
idxToResp, err = cfg.BroadcaseRequest(databaseID, dbtesterpb.Operation_Stop)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
plog.Warningf("#%d: STOP failed at %v", i, err)
|
plog.Warningf("#%d: STOP failed at %v", i, err)
|
||||||
time.Sleep(300 * time.Millisecond)
|
time.Sleep(300 * time.Millisecond)
|
||||||
|
|
@ -206,36 +198,36 @@ func commandFunc(cmd *cobra.Command, args []string) error {
|
||||||
close(donec)
|
close(donec)
|
||||||
<-sysdonec
|
<-sysdonec
|
||||||
|
|
||||||
if gcfg.BenchmarkSteps.Step4UploadLogs {
|
if gcfg.ConfigClientMachineBenchmarkSteps.Step4UploadLogs {
|
||||||
println()
|
println()
|
||||||
time.Sleep(3 * time.Second)
|
time.Sleep(3 * time.Second)
|
||||||
println()
|
println()
|
||||||
plog.Info("step 4: uploading logs...")
|
plog.Info("step 4: uploading logs...")
|
||||||
if err = cfg.UploadToGoogle(databaseID, cfg.Control.LogPath); err != nil {
|
if err = cfg.UploadToGoogle(databaseID, cfg.ConfigClientMachineInitial.LogPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = cfg.UploadToGoogle(databaseID, cfg.Control.ClientSystemMetricsPath); err != nil {
|
if err = cfg.UploadToGoogle(databaseID, cfg.ConfigClientMachineInitial.ClientSystemMetricsPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = cfg.UploadToGoogle(databaseID, cfg.Control.ClientSystemMetricsInterpolatedPath); err != nil {
|
if err = cfg.UploadToGoogle(databaseID, cfg.ConfigClientMachineInitial.ClientSystemMetricsInterpolatedPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = cfg.UploadToGoogle(databaseID, cfg.Control.ClientLatencyThroughputTimeseriesPath); err != nil {
|
if err = cfg.UploadToGoogle(databaseID, cfg.ConfigClientMachineInitial.ClientLatencyThroughputTimeseriesPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = cfg.UploadToGoogle(databaseID, cfg.Control.ClientLatencyDistributionAllPath); err != nil {
|
if err = cfg.UploadToGoogle(databaseID, cfg.ConfigClientMachineInitial.ClientLatencyDistributionAllPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = cfg.UploadToGoogle(databaseID, cfg.Control.ClientLatencyDistributionPercentilePath); err != nil {
|
if err = cfg.UploadToGoogle(databaseID, cfg.ConfigClientMachineInitial.ClientLatencyDistributionPercentilePath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = cfg.UploadToGoogle(databaseID, cfg.Control.ClientLatencyDistributionSummaryPath); err != nil {
|
if err = cfg.UploadToGoogle(databaseID, cfg.ConfigClientMachineInitial.ClientLatencyDistributionSummaryPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = cfg.UploadToGoogle(databaseID, cfg.Control.ClientLatencyByKeyNumberPath); err != nil {
|
if err = cfg.UploadToGoogle(databaseID, cfg.ConfigClientMachineInitial.ClientLatencyByKeyNumberPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = cfg.UploadToGoogle(databaseID, cfg.Control.ServerDiskSpaceUsageSummaryPath); err != nil {
|
if err = cfg.UploadToGoogle(databaseID, cfg.ConfigClientMachineInitial.ServerDiskSpaceUsageSummaryPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,57 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
package dbtesterpb;
|
||||||
|
|
||||||
|
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.sizer_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
option (gogoproto.goproto_getters_all) = false;
|
||||||
|
|
||||||
|
// ConfigAnalyzeMachineInitial represents common control options and test data information for analyzer machine.
|
||||||
|
message ConfigAnalyzeMachineInitial {
|
||||||
|
string DatabaseID = 1;
|
||||||
|
string DatabaseTag = 2;
|
||||||
|
string DatabaseDescription = 3;
|
||||||
|
|
||||||
|
string PathPrefix = 4 [(gogoproto.moretags) = "yaml:\"path_prefix\""];
|
||||||
|
string ClientSystemMetricsInterpolatedPath = 5 [(gogoproto.moretags) = "yaml:\"client_system_metrics_interpolated_path\""];
|
||||||
|
string ClientLatencyThroughputTimeseriesPath = 6 [(gogoproto.moretags) = "yaml:\"client_latency_throughput_timeseries_path\""];
|
||||||
|
string ClientLatencyDistributionAllPath = 7 [(gogoproto.moretags) = "yaml:\"client_latency_distribution_all_path\""];
|
||||||
|
string ClientLatencyDistributionPercentilePath = 8 [(gogoproto.moretags) = "yaml:\"client_latency_distribution_percentile_path\""];
|
||||||
|
string ClientLatencyDistributionSummaryPath = 9 [(gogoproto.moretags) = "yaml:\"client_latency_distribution_summary_path\""];
|
||||||
|
string ClientLatencyByKeyNumberPath = 10 [(gogoproto.moretags) = "yaml:\"client_latency_by_key_number_path\""];
|
||||||
|
string ServerDiskSpaceUsageSummaryPath = 11 [(gogoproto.moretags) = "yaml:\"server_disk_space_usage_summary_path\""];
|
||||||
|
string ServerMemoryByKeyNumberPath = 12 [(gogoproto.moretags) = "yaml:\"server_memory_by_key_number_path\""];
|
||||||
|
string ServerReadBytesDeltaByKeyNumberPath = 13 [(gogoproto.moretags) = "yaml:\"server_read_bytes_delta_by_key_number_path\""];
|
||||||
|
string ServerWriteBytesDeltaByKeyNumberPath = 14 [(gogoproto.moretags) = "yaml:\"server_write_bytes_delta_by_key_number_path\""];
|
||||||
|
repeated string ServerSystemMetricsInterpolatedPathList = 15 [(gogoproto.moretags) = "yaml:\"server_system_metrics_interpolated_path_list\""];
|
||||||
|
string AllAggregatedOutputPath = 16 [(gogoproto.moretags) = "yaml:\"all_aggregated_output_path\""];
|
||||||
|
}
|
||||||
|
|
||||||
|
message ConfigAnalyzeMachineAllAggregatedOutput {
|
||||||
|
string AllAggregatedOutputPathCSV = 1 [(gogoproto.moretags) = "yaml:\"all_aggregated_output_path_csv\""];
|
||||||
|
string AllAggregatedOutputPathTXT = 2 [(gogoproto.moretags) = "yaml:\"all_aggregated_output_path_txt\""];
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigAnalyzeMachinePlot defines plot configuration.
|
||||||
|
message ConfigAnalyzeMachinePlot {
|
||||||
|
string Column = 1 [(gogoproto.moretags) = "yaml:\"column\""];
|
||||||
|
string XAxis = 2 [(gogoproto.moretags) = "yaml:\"x_axis\""];
|
||||||
|
string YAxis = 3 [(gogoproto.moretags) = "yaml:\"y_axis\""];
|
||||||
|
string OutputPathCSV = 4 [(gogoproto.moretags) = "yaml:\"output_path_csv\""];
|
||||||
|
repeated string OutputPathList = 5 [(gogoproto.moretags) = "yaml:\"output_path_list\""];
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigAnalyzeMachineImage defines image configuration.
|
||||||
|
message ConfigAnalyzeMachineImage {
|
||||||
|
string Title = 1 [(gogoproto.moretags) = "yaml:\"title\""];
|
||||||
|
string Path = 2 [(gogoproto.moretags) = "yaml:\"path\""];
|
||||||
|
string Type = 3 [(gogoproto.moretags) = "yaml:\"type\""];
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigAnalyzeMachineREADME defines read configuration.
|
||||||
|
message ConfigAnalyzeMachineREADME {
|
||||||
|
string OutputPath = 1 [(gogoproto.moretags) = "yaml:\"output_path\""];
|
||||||
|
repeated ConfigAnalyzeMachineImage Images = 2 [(gogoproto.moretags) = "yaml:\"images\""];
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,92 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
package dbtesterpb;
|
||||||
|
|
||||||
|
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
import "dbtesterpb/flag_etcd.proto";
|
||||||
|
import "dbtesterpb/flag_zookeeper.proto";
|
||||||
|
import "dbtesterpb/flag_consul.proto";
|
||||||
|
import "dbtesterpb/flag_zetcd.proto";
|
||||||
|
import "dbtesterpb/flag_cetcd.proto";
|
||||||
|
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.sizer_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
option (gogoproto.goproto_getters_all) = false;
|
||||||
|
|
||||||
|
// ConfigClientMachineInitial represents common control options on client machine.
|
||||||
|
message ConfigClientMachineInitial {
|
||||||
|
string PathPrefix = 1 [(gogoproto.moretags) = "yaml:\"path_prefix\""];
|
||||||
|
string LogPath = 2 [(gogoproto.moretags) = "yaml:\"log_path\""];
|
||||||
|
string ClientSystemMetricsPath = 3 [(gogoproto.moretags) = "yaml:\"client_system_metrics_path\""];
|
||||||
|
string ClientSystemMetricsInterpolatedPath = 4 [(gogoproto.moretags) = "yaml:\"client_system_metrics_interpolated_path\""];
|
||||||
|
string ClientLatencyThroughputTimeseriesPath = 5 [(gogoproto.moretags) = "yaml:\"client_latency_throughput_timeseries_path\""];
|
||||||
|
string ClientLatencyDistributionAllPath = 6 [(gogoproto.moretags) = "yaml:\"client_latency_distribution_all_path\""];
|
||||||
|
string ClientLatencyDistributionPercentilePath = 7 [(gogoproto.moretags) = "yaml:\"client_latency_distribution_percentile_path\""];
|
||||||
|
string ClientLatencyDistributionSummaryPath = 8 [(gogoproto.moretags) = "yaml:\"client_latency_distribution_summary_path\""];
|
||||||
|
string ClientLatencyByKeyNumberPath = 9 [(gogoproto.moretags) = "yaml:\"client_latency_by_key_number_path\""];
|
||||||
|
string ServerDiskSpaceUsageSummaryPath = 10 [(gogoproto.moretags) = "yaml:\"server_disk_space_usage_summary_path\""];
|
||||||
|
|
||||||
|
string GoogleCloudProjectName = 100 [(gogoproto.moretags) = "yaml:\"google_cloud_project_name\""];
|
||||||
|
string GoogleCloudStorageKeyPath = 101 [(gogoproto.moretags) = "yaml:\"google_cloud_storage_key_path\""];
|
||||||
|
string GoogleCloudStorageKey = 102;
|
||||||
|
string GoogleCloudStorageBucketName = 103 [(gogoproto.moretags) = "yaml:\"google_cloud_storage_bucket_name\""];
|
||||||
|
string GoogleCloudStorageSubDirectory = 104 [(gogoproto.moretags) = "yaml:\"google_cloud_storage_sub_directory\""];
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigClientMachineBenchmarkOptions represents benchmark options.
|
||||||
|
message ConfigClientMachineBenchmarkOptions {
|
||||||
|
string Type = 1 [(gogoproto.moretags) = "yaml:\"type\""];
|
||||||
|
int64 RequestNumber = 2 [(gogoproto.moretags) = "yaml:\"request_number\""];
|
||||||
|
int64 ConnectionNumber = 3 [(gogoproto.moretags) = "yaml:\"connection_number\""];
|
||||||
|
int64 ClientNumber = 4 [(gogoproto.moretags) = "yaml:\"client_number\""];
|
||||||
|
repeated int64 ConnectionClientNumbers = 5 [(gogoproto.moretags) = "yaml:\"connection_client_numbers\""];
|
||||||
|
int64 RateLimitRequestsPerSecond = 6 [(gogoproto.moretags) = "yaml:\"rate_limit_requests_per_second\""];
|
||||||
|
|
||||||
|
bool SameKey = 7 [(gogoproto.moretags) = "yaml:\"same_key\""];
|
||||||
|
int64 KeySizeBytes = 8 [(gogoproto.moretags) = "yaml:\"key_size_bytes\""];
|
||||||
|
int64 ValueSizeBytes = 9 [(gogoproto.moretags) = "yaml:\"value_size_bytes\""];
|
||||||
|
|
||||||
|
bool StaleRead = 10 [(gogoproto.moretags) = "yaml:\"stale_read\""];
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigClientMachineBenchmarkSteps represents benchmark steps.
|
||||||
|
message ConfigClientMachineBenchmarkSteps {
|
||||||
|
bool Step1StartDatabase = 1 [(gogoproto.moretags) = "yaml:\"step1_start_database\""];
|
||||||
|
bool Step2StressDatabase = 2 [(gogoproto.moretags) = "yaml:\"step2_stress_database\""];
|
||||||
|
bool Step3StopDatabase = 3 [(gogoproto.moretags) = "yaml:\"step3_stop_database\""];
|
||||||
|
bool Step4UploadLogs = 4 [(gogoproto.moretags) = "yaml:\"step4_upload_logs\""];
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigClientMachineAgentControl represents control options on client machine.
|
||||||
|
message ConfigClientMachineAgentControl {
|
||||||
|
string DatabaseID = 1 [(gogoproto.moretags) = "yaml:\"database_id\""];
|
||||||
|
string DatabaseDescription = 2 [(gogoproto.moretags) = "yaml:\"database_description\""];
|
||||||
|
string DatabaseTag = 3 [(gogoproto.moretags) = "yaml:\"database_tag\""];
|
||||||
|
|
||||||
|
repeated string PeerIPs = 4 [(gogoproto.moretags) = "yaml:\"peer_ips\""];
|
||||||
|
string PeerIPsString = 5 [(gogoproto.moretags) = "yaml:\"peer_ips_string\""];
|
||||||
|
|
||||||
|
int64 AgentPortToConnect = 6 [(gogoproto.moretags) = "yaml:\"agent_port_to_connect\""];
|
||||||
|
repeated string AgentEndpoints = 7 [(gogoproto.moretags) = "yaml:\"agent_endpoints\""];
|
||||||
|
|
||||||
|
int64 DatabasePortToConnect = 8 [(gogoproto.moretags) = "yaml:\"database_port_to_connect\""];
|
||||||
|
repeated string DatabaseEndpoints = 9 [(gogoproto.moretags) = "yaml:\"database_endpoints\""];
|
||||||
|
|
||||||
|
flag__etcd__v2_3 flag__etcd__v2_3 = 100 [(gogoproto.moretags) = "yaml:\"etcd__v2_3\""];
|
||||||
|
flag__etcd__v3_1 flag__etcd__v3_1 = 101 [(gogoproto.moretags) = "yaml:\"etcd__v3_1\""];
|
||||||
|
flag__etcd__v3_2 flag__etcd__v3_2 = 102 [(gogoproto.moretags) = "yaml:\"etcd__v3_2\""];
|
||||||
|
flag__etcd__tip flag__etcd__tip = 103 [(gogoproto.moretags) = "yaml:\"etcd__tip\""];
|
||||||
|
|
||||||
|
flag__zookeeper__r3_4_9 flag__zookeeper__r3_4_9 = 200 [(gogoproto.moretags) = "yaml:\"zookeeper__r3_4_9\""];
|
||||||
|
flag__zookeeper__r3_5_2_alpha flag__zookeeper__r3_5_2_alpha = 201 [(gogoproto.moretags) = "yaml:\"zookeeper__r3_5_2_alpha\""];
|
||||||
|
|
||||||
|
flag__consul__v0_7_5 flag__consul__v0_7_5 = 300 [(gogoproto.moretags) = "yaml:\"consul__v0_7_5\""];
|
||||||
|
flag__consul__v0_8_0 flag__consul__v0_8_0 = 301 [(gogoproto.moretags) = "yaml:\"consul__v0_8_0\""];
|
||||||
|
|
||||||
|
flag__cetcd__beta flag__cetcd__beta = 400 [(gogoproto.moretags) = "yaml:\"cetcd__beta\""];
|
||||||
|
flag__zetcd__beta flag__zetcd__beta = 500 [(gogoproto.moretags) = "yaml:\"zetcd__beta\""];
|
||||||
|
|
||||||
|
ConfigClientMachineBenchmarkOptions ConfigClientMachineBenchmarkOptions = 1000 [(gogoproto.moretags) = "yaml:\"benchmark_options\""];
|
||||||
|
ConfigClientMachineBenchmarkSteps ConfigClientMachineBenchmarkSteps = 1001 [(gogoproto.moretags) = "yaml:\"benchmark_steps\""];
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,89 @@
|
||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: dbtesterpb/database_id.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package dbtesterpb
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import _ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// DatabaseID differentiates between major or minor releases (possibly different APIs)
|
||||||
|
// of each database. Make sure to make accordingn changes in 'flag_*' whenever an ID
|
||||||
|
// is added/removed.
|
||||||
|
type DatabaseID int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
DatabaseID_etcd__v2_3 DatabaseID = 0
|
||||||
|
DatabaseID_etcd__v3_1 DatabaseID = 1
|
||||||
|
DatabaseID_etcd__v3_2 DatabaseID = 2
|
||||||
|
DatabaseID_etcd__tip DatabaseID = 3
|
||||||
|
DatabaseID_zookeeper__r3_4_9 DatabaseID = 10
|
||||||
|
DatabaseID_zookeeper__r3_5_2_alpha DatabaseID = 11
|
||||||
|
DatabaseID_consul__v0_7_5 DatabaseID = 20
|
||||||
|
DatabaseID_consul__v0_8_0 DatabaseID = 21
|
||||||
|
DatabaseID_zetcd__beta DatabaseID = 30
|
||||||
|
DatabaseID_cetcd__beta DatabaseID = 40
|
||||||
|
)
|
||||||
|
|
||||||
|
var DatabaseID_name = map[int32]string{
|
||||||
|
0: "etcd__v2_3",
|
||||||
|
1: "etcd__v3_1",
|
||||||
|
2: "etcd__v3_2",
|
||||||
|
3: "etcd__tip",
|
||||||
|
10: "zookeeper__r3_4_9",
|
||||||
|
11: "zookeeper__r3_5_2_alpha",
|
||||||
|
20: "consul__v0_7_5",
|
||||||
|
21: "consul__v0_8_0",
|
||||||
|
30: "zetcd__beta",
|
||||||
|
40: "cetcd__beta",
|
||||||
|
}
|
||||||
|
var DatabaseID_value = map[string]int32{
|
||||||
|
"etcd__v2_3": 0,
|
||||||
|
"etcd__v3_1": 1,
|
||||||
|
"etcd__v3_2": 2,
|
||||||
|
"etcd__tip": 3,
|
||||||
|
"zookeeper__r3_4_9": 10,
|
||||||
|
"zookeeper__r3_5_2_alpha": 11,
|
||||||
|
"consul__v0_7_5": 20,
|
||||||
|
"consul__v0_8_0": 21,
|
||||||
|
"zetcd__beta": 30,
|
||||||
|
"cetcd__beta": 40,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x DatabaseID) String() string {
|
||||||
|
return proto.EnumName(DatabaseID_name, int32(x))
|
||||||
|
}
|
||||||
|
func (DatabaseID) EnumDescriptor() ([]byte, []int) { return fileDescriptorDatabaseId, []int{0} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterEnum("dbtesterpb.DatabaseID", DatabaseID_name, DatabaseID_value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("dbtesterpb/database_id.proto", fileDescriptorDatabaseId) }
|
||||||
|
|
||||||
|
var fileDescriptorDatabaseId = []byte{
|
||||||
|
// 245 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x8f, 0xcf, 0x4a, 0xc3, 0x40,
|
||||||
|
0x10, 0x87, 0xb3, 0x0a, 0x82, 0x53, 0xac, 0xe3, 0xd2, 0x22, 0xa8, 0xec, 0x59, 0x04, 0x9b, 0x9a,
|
||||||
|
0x58, 0xd4, 0xab, 0xf4, 0xe2, 0x53, 0x0c, 0xbb, 0xc9, 0x9a, 0x06, 0xab, 0xbb, 0x24, 0x93, 0x1e,
|
||||||
|
0xfa, 0x24, 0x3e, 0x52, 0xbd, 0xf9, 0x08, 0x1a, 0x5f, 0x44, 0xdc, 0x08, 0x36, 0xde, 0xe6, 0xfb,
|
||||||
|
0xe6, 0x1f, 0x3f, 0x38, 0xcb, 0x0d, 0xdb, 0x9a, 0x6d, 0xe5, 0x4d, 0x9c, 0x6b, 0xd6, 0x46, 0xd7,
|
||||||
|
0x96, 0xca, 0x7c, 0xe2, 0x2b, 0xc7, 0x4e, 0xc2, 0x5f, 0xf7, 0xe4, 0xb2, 0x28, 0x79, 0xd1, 0x98,
|
||||||
|
0x49, 0xe6, 0x9e, 0xe3, 0xc2, 0x15, 0x2e, 0x0e, 0x23, 0xa6, 0x79, 0x0c, 0x14, 0x20, 0x54, 0xdd,
|
||||||
|
0xea, 0xc5, 0x9b, 0x00, 0x98, 0xff, 0x1e, 0x7c, 0x98, 0xcb, 0x21, 0x80, 0xe5, 0x2c, 0x27, 0x5a,
|
||||||
|
0x25, 0x94, 0x62, 0xb4, 0xc5, 0x29, 0x5d, 0xa1, 0xe8, 0x71, 0x82, 0x3b, 0xf2, 0x00, 0xf6, 0x3b,
|
||||||
|
0xe6, 0xd2, 0xe3, 0xae, 0x1c, 0xc3, 0xd1, 0xda, 0xb9, 0x27, 0x6b, 0xbd, 0xad, 0x88, 0xaa, 0x94,
|
||||||
|
0xae, 0xe9, 0x0e, 0x41, 0x9e, 0xc2, 0x71, 0x5f, 0xcf, 0x28, 0x21, 0xbd, 0xf4, 0x0b, 0x8d, 0x03,
|
||||||
|
0x29, 0x61, 0x98, 0xb9, 0x97, 0xba, 0x59, 0x12, 0xad, 0xa6, 0x74, 0x43, 0x33, 0x1c, 0xfd, 0x73,
|
||||||
|
0xb7, 0x34, 0xc5, 0xb1, 0x3c, 0x84, 0xc1, 0xba, 0xfb, 0x65, 0x2c, 0x6b, 0x54, 0x3f, 0x22, 0xdb,
|
||||||
|
0x12, 0xe7, 0xf7, 0xa3, 0xcd, 0xa7, 0x8a, 0x36, 0xad, 0x12, 0xef, 0xad, 0x12, 0x1f, 0xad, 0x12,
|
||||||
|
0xaf, 0x5f, 0x2a, 0x32, 0x7b, 0x21, 0x68, 0xfa, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x89, 0x6f, 0x4c,
|
||||||
|
0x96, 0x43, 0x01, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,28 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
package dbtesterpb;
|
||||||
|
|
||||||
|
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.sizer_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
option (gogoproto.goproto_getters_all) = false;
|
||||||
|
|
||||||
|
// DatabaseID differentiates between major or minor releases (possibly different APIs)
|
||||||
|
// of each database. Make sure to make accordingn changes in 'flag_*' whenever an ID
|
||||||
|
// is added/removed.
|
||||||
|
enum DatabaseID {
|
||||||
|
etcd__v2_3 = 0;
|
||||||
|
etcd__v3_1 = 1;
|
||||||
|
etcd__v3_2 = 2;
|
||||||
|
etcd__tip = 3;
|
||||||
|
|
||||||
|
zookeeper__r3_4_9 = 10;
|
||||||
|
zookeeper__r3_5_2_alpha = 11;
|
||||||
|
|
||||||
|
consul__v0_7_5 = 20;
|
||||||
|
consul__v0_8_0 = 21;
|
||||||
|
|
||||||
|
zetcd__beta = 30;
|
||||||
|
cetcd__beta = 40;
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,264 @@
|
||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: dbtesterpb/flag_cetcd.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package dbtesterpb
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import _ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// Flag_Cetcd_Beta is cetcd-specific flags
|
||||||
|
// (https://github.com/coreos/cetcd).
|
||||||
|
type Flag_Cetcd_Beta struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Cetcd_Beta) Reset() { *m = Flag_Cetcd_Beta{} }
|
||||||
|
func (m *Flag_Cetcd_Beta) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Flag_Cetcd_Beta) ProtoMessage() {}
|
||||||
|
func (*Flag_Cetcd_Beta) Descriptor() ([]byte, []int) { return fileDescriptorFlagCetcd, []int{0} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Flag_Cetcd_Beta)(nil), "dbtesterpb.flag__cetcd__beta")
|
||||||
|
}
|
||||||
|
func (m *Flag_Cetcd_Beta) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Cetcd_Beta) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64FlagCetcd(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32FlagCetcd(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintFlagCetcd(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *Flag_Cetcd_Beta) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovFlagCetcd(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozFlagCetcd(x uint64) (n int) {
|
||||||
|
return sovFlagCetcd(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (m *Flag_Cetcd_Beta) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagCetcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: flag__cetcd__beta: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: flag__cetcd__beta: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipFlagCetcd(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthFlagCetcd
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipFlagCetcd(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagCetcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagCetcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagCetcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthFlagCetcd
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagCetcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipFlagCetcd(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthFlagCetcd = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowFlagCetcd = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("dbtesterpb/flag_cetcd.proto", fileDescriptorFlagCetcd) }
|
||||||
|
|
||||||
|
var fileDescriptorFlagCetcd = []byte{
|
||||||
|
// 130 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x49, 0x2a, 0x49,
|
||||||
|
0x2d, 0x2e, 0x49, 0x2d, 0x2a, 0x48, 0xd2, 0x4f, 0xcb, 0x49, 0x4c, 0x8f, 0x4f, 0x4e, 0x2d, 0x49,
|
||||||
|
0x4e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x48, 0x4a, 0xe9, 0xa6, 0x67, 0x96,
|
||||||
|
0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0x95, 0x24,
|
||||||
|
0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xaa, 0x24, 0xcc, 0x25, 0x08, 0x36, 0x0e,
|
||||||
|
0x62, 0x5e, 0x7c, 0x7c, 0x52, 0x6a, 0x49, 0xa2, 0x93, 0xc8, 0x89, 0x87, 0x72, 0x0c, 0x27, 0x1e,
|
||||||
|
0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49,
|
||||||
|
0x6c, 0x60, 0x1d, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x7e, 0x95, 0x6e, 0x8b, 0x00,
|
||||||
|
0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,14 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
package dbtesterpb;
|
||||||
|
|
||||||
|
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.sizer_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
option (gogoproto.goproto_getters_all) = false;
|
||||||
|
|
||||||
|
// Flag_Cetcd_Beta is cetcd-specific flags
|
||||||
|
// (https://github.com/coreos/cetcd).
|
||||||
|
message flag__cetcd__beta {
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,347 @@
|
||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: dbtesterpb/flag_consul.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package dbtesterpb
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import _ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// See https://github.com/hashicorp/consul for more.
|
||||||
|
type Flag_Consul_V0_7_5 struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Consul_V0_7_5) Reset() { *m = Flag_Consul_V0_7_5{} }
|
||||||
|
func (m *Flag_Consul_V0_7_5) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Flag_Consul_V0_7_5) ProtoMessage() {}
|
||||||
|
func (*Flag_Consul_V0_7_5) Descriptor() ([]byte, []int) { return fileDescriptorFlagConsul, []int{0} }
|
||||||
|
|
||||||
|
// See https://github.com/hashicorp/consul for more.
|
||||||
|
type Flag_Consul_V0_8_0 struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Consul_V0_8_0) Reset() { *m = Flag_Consul_V0_8_0{} }
|
||||||
|
func (m *Flag_Consul_V0_8_0) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Flag_Consul_V0_8_0) ProtoMessage() {}
|
||||||
|
func (*Flag_Consul_V0_8_0) Descriptor() ([]byte, []int) { return fileDescriptorFlagConsul, []int{1} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Flag_Consul_V0_7_5)(nil), "dbtesterpb.flag__consul__v0_7_5")
|
||||||
|
proto.RegisterType((*Flag_Consul_V0_8_0)(nil), "dbtesterpb.flag__consul__v0_8_0")
|
||||||
|
}
|
||||||
|
func (m *Flag_Consul_V0_7_5) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Consul_V0_7_5) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Consul_V0_8_0) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Consul_V0_8_0) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64FlagConsul(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32FlagConsul(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintFlagConsul(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *Flag_Consul_V0_7_5) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Consul_V0_8_0) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovFlagConsul(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozFlagConsul(x uint64) (n int) {
|
||||||
|
return sovFlagConsul(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (m *Flag_Consul_V0_7_5) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagConsul
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: flag__consul__v0_7_5: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: flag__consul__v0_7_5: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipFlagConsul(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthFlagConsul
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *Flag_Consul_V0_8_0) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagConsul
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: flag__consul__v0_8_0: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: flag__consul__v0_8_0: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipFlagConsul(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthFlagConsul
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipFlagConsul(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagConsul
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagConsul
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagConsul
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthFlagConsul
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagConsul
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipFlagConsul(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthFlagConsul = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowFlagConsul = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("dbtesterpb/flag_consul.proto", fileDescriptorFlagConsul) }
|
||||||
|
|
||||||
|
var fileDescriptorFlagConsul = []byte{
|
||||||
|
// 138 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0x49, 0x2a, 0x49,
|
||||||
|
0x2d, 0x2e, 0x49, 0x2d, 0x2a, 0x48, 0xd2, 0x4f, 0xcb, 0x49, 0x4c, 0x8f, 0x4f, 0xce, 0xcf, 0x2b,
|
||||||
|
0x2e, 0xcd, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0xc8, 0x4a, 0xe9, 0xa6, 0x67,
|
||||||
|
0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0x95,
|
||||||
|
0x24, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xaa, 0x24, 0xc6, 0x25, 0x02, 0x36,
|
||||||
|
0x0f, 0x6a, 0x60, 0x7c, 0x7c, 0x99, 0x41, 0xbc, 0x79, 0xbc, 0x29, 0x56, 0x71, 0x8b, 0x78, 0x03,
|
||||||
|
0x27, 0x91, 0x13, 0x0f, 0xe5, 0x18, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1,
|
||||||
|
0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18, 0x92, 0xd8, 0xc0, 0x86, 0x19, 0x03, 0x02, 0x00, 0x00,
|
||||||
|
0xff, 0xff, 0x93, 0xd4, 0x88, 0x26, 0xa7, 0x00, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,17 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
package dbtesterpb;
|
||||||
|
|
||||||
|
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.sizer_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
option (gogoproto.goproto_getters_all) = false;
|
||||||
|
|
||||||
|
// See https://github.com/hashicorp/consul for more.
|
||||||
|
message flag__consul__v0_7_5 {
|
||||||
|
}
|
||||||
|
|
||||||
|
// See https://github.com/hashicorp/consul for more.
|
||||||
|
message flag__consul__v0_8_0 {
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,718 @@
|
||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: dbtesterpb/flag_etcd.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package dbtesterpb
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import _ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// See https://github.com/coreos/etcd/blob/master/etcdmain/help.go for more.
|
||||||
|
type Flag_Etcd_V2_3 struct {
|
||||||
|
SnapshotCount int64 `protobuf:"varint,1,opt,name=SnapshotCount,proto3" json:"SnapshotCount,omitempty" yaml:"snapshot_count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Etcd_V2_3) Reset() { *m = Flag_Etcd_V2_3{} }
|
||||||
|
func (m *Flag_Etcd_V2_3) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Flag_Etcd_V2_3) ProtoMessage() {}
|
||||||
|
func (*Flag_Etcd_V2_3) Descriptor() ([]byte, []int) { return fileDescriptorFlagEtcd, []int{0} }
|
||||||
|
|
||||||
|
// See https://github.com/coreos/etcd/blob/master/etcdmain/help.go for more.
|
||||||
|
type Flag_Etcd_V3_1 struct {
|
||||||
|
SnapshotCount int64 `protobuf:"varint,1,opt,name=SnapshotCount,proto3" json:"SnapshotCount,omitempty" yaml:"snapshot_count"`
|
||||||
|
QuotaSizeBytes int64 `protobuf:"varint,2,opt,name=QuotaSizeBytes,proto3" json:"QuotaSizeBytes,omitempty" yaml:"quota_size_bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Etcd_V3_1) Reset() { *m = Flag_Etcd_V3_1{} }
|
||||||
|
func (m *Flag_Etcd_V3_1) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Flag_Etcd_V3_1) ProtoMessage() {}
|
||||||
|
func (*Flag_Etcd_V3_1) Descriptor() ([]byte, []int) { return fileDescriptorFlagEtcd, []int{1} }
|
||||||
|
|
||||||
|
// See https://github.com/coreos/etcd/blob/master/etcdmain/help.go for more.
|
||||||
|
type Flag_Etcd_V3_2 struct {
|
||||||
|
SnapshotCount int64 `protobuf:"varint,1,opt,name=SnapshotCount,proto3" json:"SnapshotCount,omitempty" yaml:"snapshot_count"`
|
||||||
|
QuotaSizeBytes int64 `protobuf:"varint,2,opt,name=QuotaSizeBytes,proto3" json:"QuotaSizeBytes,omitempty" yaml:"quota_size_bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Etcd_V3_2) Reset() { *m = Flag_Etcd_V3_2{} }
|
||||||
|
func (m *Flag_Etcd_V3_2) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Flag_Etcd_V3_2) ProtoMessage() {}
|
||||||
|
func (*Flag_Etcd_V3_2) Descriptor() ([]byte, []int) { return fileDescriptorFlagEtcd, []int{2} }
|
||||||
|
|
||||||
|
// See https://github.com/coreos/etcd/blob/master/etcdmain/help.go for more.
|
||||||
|
type Flag_Etcd_Tip struct {
|
||||||
|
SnapshotCount int64 `protobuf:"varint,1,opt,name=SnapshotCount,proto3" json:"SnapshotCount,omitempty" yaml:"snapshot_count"`
|
||||||
|
QuotaSizeBytes int64 `protobuf:"varint,2,opt,name=QuotaSizeBytes,proto3" json:"QuotaSizeBytes,omitempty" yaml:"quota_size_bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Etcd_Tip) Reset() { *m = Flag_Etcd_Tip{} }
|
||||||
|
func (m *Flag_Etcd_Tip) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Flag_Etcd_Tip) ProtoMessage() {}
|
||||||
|
func (*Flag_Etcd_Tip) Descriptor() ([]byte, []int) { return fileDescriptorFlagEtcd, []int{3} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Flag_Etcd_V2_3)(nil), "dbtesterpb.flag__etcd__v2_3")
|
||||||
|
proto.RegisterType((*Flag_Etcd_V3_1)(nil), "dbtesterpb.flag__etcd__v3_1")
|
||||||
|
proto.RegisterType((*Flag_Etcd_V3_2)(nil), "dbtesterpb.flag__etcd__v3_2")
|
||||||
|
proto.RegisterType((*Flag_Etcd_Tip)(nil), "dbtesterpb.flag__etcd__tip")
|
||||||
|
}
|
||||||
|
func (m *Flag_Etcd_V2_3) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Etcd_V2_3) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.SnapshotCount != 0 {
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
i++
|
||||||
|
i = encodeVarintFlagEtcd(dAtA, i, uint64(m.SnapshotCount))
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Etcd_V3_1) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Etcd_V3_1) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.SnapshotCount != 0 {
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
i++
|
||||||
|
i = encodeVarintFlagEtcd(dAtA, i, uint64(m.SnapshotCount))
|
||||||
|
}
|
||||||
|
if m.QuotaSizeBytes != 0 {
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
i++
|
||||||
|
i = encodeVarintFlagEtcd(dAtA, i, uint64(m.QuotaSizeBytes))
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Etcd_V3_2) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Etcd_V3_2) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.SnapshotCount != 0 {
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
i++
|
||||||
|
i = encodeVarintFlagEtcd(dAtA, i, uint64(m.SnapshotCount))
|
||||||
|
}
|
||||||
|
if m.QuotaSizeBytes != 0 {
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
i++
|
||||||
|
i = encodeVarintFlagEtcd(dAtA, i, uint64(m.QuotaSizeBytes))
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Etcd_Tip) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Etcd_Tip) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.SnapshotCount != 0 {
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
i++
|
||||||
|
i = encodeVarintFlagEtcd(dAtA, i, uint64(m.SnapshotCount))
|
||||||
|
}
|
||||||
|
if m.QuotaSizeBytes != 0 {
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
i++
|
||||||
|
i = encodeVarintFlagEtcd(dAtA, i, uint64(m.QuotaSizeBytes))
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64FlagEtcd(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32FlagEtcd(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintFlagEtcd(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *Flag_Etcd_V2_3) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.SnapshotCount != 0 {
|
||||||
|
n += 1 + sovFlagEtcd(uint64(m.SnapshotCount))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Etcd_V3_1) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.SnapshotCount != 0 {
|
||||||
|
n += 1 + sovFlagEtcd(uint64(m.SnapshotCount))
|
||||||
|
}
|
||||||
|
if m.QuotaSizeBytes != 0 {
|
||||||
|
n += 1 + sovFlagEtcd(uint64(m.QuotaSizeBytes))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Etcd_V3_2) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.SnapshotCount != 0 {
|
||||||
|
n += 1 + sovFlagEtcd(uint64(m.SnapshotCount))
|
||||||
|
}
|
||||||
|
if m.QuotaSizeBytes != 0 {
|
||||||
|
n += 1 + sovFlagEtcd(uint64(m.QuotaSizeBytes))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Etcd_Tip) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.SnapshotCount != 0 {
|
||||||
|
n += 1 + sovFlagEtcd(uint64(m.SnapshotCount))
|
||||||
|
}
|
||||||
|
if m.QuotaSizeBytes != 0 {
|
||||||
|
n += 1 + sovFlagEtcd(uint64(m.QuotaSizeBytes))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovFlagEtcd(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozFlagEtcd(x uint64) (n int) {
|
||||||
|
return sovFlagEtcd(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (m *Flag_Etcd_V2_3) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: flag__etcd__v2_3: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: flag__etcd__v2_3: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field SnapshotCount", wireType)
|
||||||
|
}
|
||||||
|
m.SnapshotCount = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.SnapshotCount |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipFlagEtcd(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthFlagEtcd
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *Flag_Etcd_V3_1) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: flag__etcd__v3_1: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: flag__etcd__v3_1: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field SnapshotCount", wireType)
|
||||||
|
}
|
||||||
|
m.SnapshotCount = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.SnapshotCount |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field QuotaSizeBytes", wireType)
|
||||||
|
}
|
||||||
|
m.QuotaSizeBytes = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.QuotaSizeBytes |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipFlagEtcd(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthFlagEtcd
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *Flag_Etcd_V3_2) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: flag__etcd__v3_2: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: flag__etcd__v3_2: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field SnapshotCount", wireType)
|
||||||
|
}
|
||||||
|
m.SnapshotCount = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.SnapshotCount |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field QuotaSizeBytes", wireType)
|
||||||
|
}
|
||||||
|
m.QuotaSizeBytes = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.QuotaSizeBytes |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipFlagEtcd(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthFlagEtcd
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *Flag_Etcd_Tip) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: flag__etcd__tip: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: flag__etcd__tip: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field SnapshotCount", wireType)
|
||||||
|
}
|
||||||
|
m.SnapshotCount = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.SnapshotCount |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field QuotaSizeBytes", wireType)
|
||||||
|
}
|
||||||
|
m.QuotaSizeBytes = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.QuotaSizeBytes |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipFlagEtcd(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthFlagEtcd
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipFlagEtcd(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthFlagEtcd
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagEtcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipFlagEtcd(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthFlagEtcd = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowFlagEtcd = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("dbtesterpb/flag_etcd.proto", fileDescriptorFlagEtcd) }
|
||||||
|
|
||||||
|
var fileDescriptorFlagEtcd = []byte{
|
||||||
|
// 251 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4a, 0x49, 0x2a, 0x49,
|
||||||
|
0x2d, 0x2e, 0x49, 0x2d, 0x2a, 0x48, 0xd2, 0x4f, 0xcb, 0x49, 0x4c, 0x8f, 0x4f, 0x2d, 0x49, 0x4e,
|
||||||
|
0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0xc8, 0x49, 0xe9, 0xa6, 0x67, 0x96, 0x64,
|
||||||
|
0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0x95, 0x24, 0x95,
|
||||||
|
0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xaa, 0x14, 0xcc, 0x25, 0x00, 0x36, 0x0d, 0x6c,
|
||||||
|
0x5c, 0x7c, 0x7c, 0x99, 0x51, 0xbc, 0xb1, 0x90, 0x3d, 0x17, 0x6f, 0x70, 0x5e, 0x62, 0x41, 0x71,
|
||||||
|
0x46, 0x7e, 0x89, 0x73, 0x7e, 0x69, 0x5e, 0x89, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xb3, 0x93, 0xe4,
|
||||||
|
0xa7, 0x7b, 0xf2, 0xa2, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0xc5, 0x50, 0xe9, 0xf8, 0x64, 0x90,
|
||||||
|
0xbc, 0x52, 0x10, 0xaa, 0x7a, 0xa5, 0x19, 0x8c, 0x68, 0xa6, 0x1a, 0xc7, 0x1b, 0x52, 0x6c, 0xaa,
|
||||||
|
0x90, 0x33, 0x17, 0x5f, 0x60, 0x69, 0x7e, 0x49, 0x62, 0x70, 0x66, 0x55, 0xaa, 0x53, 0x65, 0x49,
|
||||||
|
0x6a, 0xb1, 0x04, 0x13, 0xd8, 0x04, 0xe9, 0x4f, 0xf7, 0xe4, 0xc5, 0x21, 0x26, 0x14, 0x82, 0xe4,
|
||||||
|
0xe3, 0x8b, 0x33, 0xab, 0x52, 0xe3, 0x93, 0x40, 0x2a, 0x94, 0x82, 0xd0, 0xb4, 0x60, 0x73, 0x9a,
|
||||||
|
0xd1, 0x20, 0x71, 0xda, 0x74, 0x46, 0x2e, 0x7e, 0x64, 0xa7, 0x95, 0x64, 0x16, 0x0c, 0x0e, 0x97,
|
||||||
|
0x39, 0x89, 0x9c, 0x78, 0x28, 0xc7, 0x70, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f,
|
||||||
|
0x1e, 0xc9, 0x31, 0xce, 0x78, 0x2c, 0xc7, 0x90, 0xc4, 0x06, 0x4e, 0x41, 0xc6, 0x80, 0x00, 0x00,
|
||||||
|
0x00, 0xff, 0xff, 0x5c, 0xec, 0xd8, 0x68, 0x9a, 0x02, 0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,32 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
package dbtesterpb;
|
||||||
|
|
||||||
|
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.sizer_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
option (gogoproto.goproto_getters_all) = false;
|
||||||
|
|
||||||
|
// See https://github.com/coreos/etcd/blob/master/etcdmain/help.go for more.
|
||||||
|
message flag__etcd__v2_3 {
|
||||||
|
int64 SnapshotCount = 1 [(gogoproto.moretags) = "yaml:\"snapshot_count\""];
|
||||||
|
}
|
||||||
|
|
||||||
|
// See https://github.com/coreos/etcd/blob/master/etcdmain/help.go for more.
|
||||||
|
message flag__etcd__v3_1 {
|
||||||
|
int64 SnapshotCount = 1 [(gogoproto.moretags) = "yaml:\"snapshot_count\""];
|
||||||
|
int64 QuotaSizeBytes = 2 [(gogoproto.moretags) = "yaml:\"quota_size_bytes\""];
|
||||||
|
}
|
||||||
|
|
||||||
|
// See https://github.com/coreos/etcd/blob/master/etcdmain/help.go for more.
|
||||||
|
message flag__etcd__v3_2 {
|
||||||
|
int64 SnapshotCount = 1 [(gogoproto.moretags) = "yaml:\"snapshot_count\""];
|
||||||
|
int64 QuotaSizeBytes = 2 [(gogoproto.moretags) = "yaml:\"quota_size_bytes\""];
|
||||||
|
}
|
||||||
|
|
||||||
|
// See https://github.com/coreos/etcd/blob/master/etcdmain/help.go for more.
|
||||||
|
message flag__etcd__tip {
|
||||||
|
int64 SnapshotCount = 1 [(gogoproto.moretags) = "yaml:\"snapshot_count\""];
|
||||||
|
int64 QuotaSizeBytes = 2 [(gogoproto.moretags) = "yaml:\"quota_size_bytes\""];
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,264 @@
|
||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: dbtesterpb/flag_zetcd.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
package dbtesterpb
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
import _ "github.com/gogo/protobuf/gogoproto"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// Flag_Zetcd_Beta is zetcd-specific flags
|
||||||
|
// (https://github.com/coreos/zetcd).
|
||||||
|
type Flag_Zetcd_Beta struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Zetcd_Beta) Reset() { *m = Flag_Zetcd_Beta{} }
|
||||||
|
func (m *Flag_Zetcd_Beta) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Flag_Zetcd_Beta) ProtoMessage() {}
|
||||||
|
func (*Flag_Zetcd_Beta) Descriptor() ([]byte, []int) { return fileDescriptorFlagZetcd, []int{0} }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Flag_Zetcd_Beta)(nil), "dbtesterpb.flag__zetcd__beta")
|
||||||
|
}
|
||||||
|
func (m *Flag_Zetcd_Beta) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Flag_Zetcd_Beta) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64FlagZetcd(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32FlagZetcd(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintFlagZetcd(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *Flag_Zetcd_Beta) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovFlagZetcd(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozFlagZetcd(x uint64) (n int) {
|
||||||
|
return sovFlagZetcd(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (m *Flag_Zetcd_Beta) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFlagZetcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: flag__zetcd__beta: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: flag__zetcd__beta: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipFlagZetcd(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthFlagZetcd
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipFlagZetcd(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagZetcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagZetcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagZetcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthFlagZetcd
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFlagZetcd
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipFlagZetcd(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthFlagZetcd = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowFlagZetcd = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("dbtesterpb/flag_zetcd.proto", fileDescriptorFlagZetcd) }
|
||||||
|
|
||||||
|
var fileDescriptorFlagZetcd = []byte{
|
||||||
|
// 130 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x49, 0x2a, 0x49,
|
||||||
|
0x2d, 0x2e, 0x49, 0x2d, 0x2a, 0x48, 0xd2, 0x4f, 0xcb, 0x49, 0x4c, 0x8f, 0xaf, 0x4a, 0x2d, 0x49,
|
||||||
|
0x4e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x48, 0x4a, 0xe9, 0xa6, 0x67, 0x96,
|
||||||
|
0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0x95, 0x24,
|
||||||
|
0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xaa, 0x24, 0xcc, 0x25, 0x08, 0x36, 0x0e,
|
||||||
|
0x62, 0x5e, 0x7c, 0x7c, 0x52, 0x6a, 0x49, 0xa2, 0x93, 0xc8, 0x89, 0x87, 0x72, 0x0c, 0x27, 0x1e,
|
||||||
|
0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49,
|
||||||
|
0x6c, 0x60, 0x1d, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xb7, 0x0d, 0x03, 0x8b, 0x00,
|
||||||
|
0x00, 0x00,
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,14 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
package dbtesterpb;
|
||||||
|
|
||||||
|
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.sizer_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
option (gogoproto.goproto_getters_all) = false;
|
||||||
|
|
||||||
|
// Flag_Zetcd_Beta is zetcd-specific flags
|
||||||
|
// (https://github.com/coreos/zetcd).
|
||||||
|
message flag__zetcd__beta {
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,73 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
package dbtesterpb;
|
||||||
|
|
||||||
|
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
option (gogoproto.marshaler_all) = true;
|
||||||
|
option (gogoproto.sizer_all) = true;
|
||||||
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
option (gogoproto.goproto_getters_all) = false;
|
||||||
|
|
||||||
|
message flag__zookeeper__r3_4_9 {
|
||||||
|
// JavaDJuteMaxBuffer is for '-Djute.maxbuffer' flag.
|
||||||
|
// It is the maximum size, in bytes, of a request or response.
|
||||||
|
// See http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html#Unsafe+Options for more.
|
||||||
|
uint64 JavaDJuteMaxBuffer = 1 [(gogoproto.moretags) = "yaml:\"java_d_jute_max_buffer\""];
|
||||||
|
|
||||||
|
// JavaXms is for '-Xms' flag (minimum Java heap size).
|
||||||
|
// See https://docs.oracle.com/cd/E13150_01/jrockit_jvm/jrockit/jrdocs/refman/optionX.html for more.
|
||||||
|
string JavaXms = 2 [(gogoproto.moretags) = "yaml:\"java_xms\""];
|
||||||
|
|
||||||
|
// JavaXmx is for '-Xmx' flag (maximum Java heap size).
|
||||||
|
// See https://docs.oracle.com/cd/E13150_01/jrockit_jvm/jrockit/jrdocs/refman/optionX.html for more.
|
||||||
|
string JavaXmx = 3 [(gogoproto.moretags) = "yaml:\"java_xmx\""];
|
||||||
|
|
||||||
|
// MyID contains a single integer in human readable ASCII text that represents the server id.
|
||||||
|
// Each ZooKeeper server has a unique id. This id is used in two places: the myid file and the
|
||||||
|
// configuration file. The myid file identifies the server that corresponds to the given data directory.
|
||||||
|
// See http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html#sc_dataFileManagement for more.
|
||||||
|
uint32 MyID = 100;
|
||||||
|
|
||||||
|
// ClientPort is by default '2181'.
|
||||||
|
// No need to set manually. Inherited from 'database_port_to_connect'.
|
||||||
|
int64 ClientPort = 101;
|
||||||
|
|
||||||
|
// See http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html for more.
|
||||||
|
int64 TickTime = 102 [(gogoproto.moretags) = "yaml:\"tick_time\""];
|
||||||
|
int64 InitLimit = 103 [(gogoproto.moretags) = "yaml:\"init_limit\""];
|
||||||
|
int64 SyncLimit = 104 [(gogoproto.moretags) = "yaml:\"sync_limit\""];
|
||||||
|
int64 SnapCount = 105 [(gogoproto.moretags) = "yaml:\"snap_count\""];
|
||||||
|
int64 MaxClientConnections = 106 [(gogoproto.moretags) = "yaml:\"max_client_connections\""];
|
||||||
|
}
|
||||||
|
|
||||||
|
message flag__zookeeper__r3_5_2_alpha {
|
||||||
|
// JavaDJuteMaxBuffer is for '-Djute.maxbuffer' flag.
|
||||||
|
// It is the maximum size, in bytes, of a request or response.
|
||||||
|
// See http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html#Unsafe+Options for more.
|
||||||
|
uint64 JavaDJuteMaxBuffer = 1 [(gogoproto.moretags) = "yaml:\"java_d_jute_max_buffer\""];
|
||||||
|
|
||||||
|
// JavaXms is for '-Xms' flag (minimum Java heap size).
|
||||||
|
// See https://docs.oracle.com/cd/E13150_01/jrockit_jvm/jrockit/jrdocs/refman/optionX.html for more.
|
||||||
|
string JavaXms = 2 [(gogoproto.moretags) = "yaml:\"java_xms\""];
|
||||||
|
|
||||||
|
// JavaXmx is for '-Xmx' flag (maximum Java heap size).
|
||||||
|
// See https://docs.oracle.com/cd/E13150_01/jrockit_jvm/jrockit/jrdocs/refman/optionX.html for more.
|
||||||
|
string JavaXmx = 3 [(gogoproto.moretags) = "yaml:\"java_xmx\""];
|
||||||
|
|
||||||
|
// MyID contains a single integer in human readable ASCII text that represents the server id.
|
||||||
|
// Each ZooKeeper server has a unique id. This id is used in two places: the myid file and the
|
||||||
|
// configuration file. The myid file identifies the server that corresponds to the given data directory.
|
||||||
|
// See http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html#sc_dataFileManagement for more.
|
||||||
|
uint32 MyID = 100;
|
||||||
|
|
||||||
|
// ClientPort is by default '2181'.
|
||||||
|
// No need to set manually. Inherited from 'database_port_to_connect'.
|
||||||
|
int64 ClientPort = 101;
|
||||||
|
|
||||||
|
// See http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html for more.
|
||||||
|
int64 TickTime = 102 [(gogoproto.moretags) = "yaml:\"tick_time\""];
|
||||||
|
int64 InitLimit = 103 [(gogoproto.moretags) = "yaml:\"init_limit\""];
|
||||||
|
int64 SyncLimit = 104 [(gogoproto.moretags) = "yaml:\"sync_limit\""];
|
||||||
|
int64 SnapCount = 105 [(gogoproto.moretags) = "yaml:\"snap_count\""];
|
||||||
|
int64 MaxClientConnections = 106 [(gogoproto.moretags) = "yaml:\"max_client_connections\""];
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -3,6 +3,16 @@ package dbtesterpb;
|
||||||
|
|
||||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||||
|
|
||||||
|
import "dbtesterpb/database_id.proto";
|
||||||
|
|
||||||
|
import "dbtesterpb/flag_etcd.proto";
|
||||||
|
import "dbtesterpb/flag_zookeeper.proto";
|
||||||
|
import "dbtesterpb/flag_consul.proto";
|
||||||
|
import "dbtesterpb/flag_zetcd.proto";
|
||||||
|
import "dbtesterpb/flag_cetcd.proto";
|
||||||
|
|
||||||
|
import "dbtesterpb/config_client_machine.proto";
|
||||||
|
|
||||||
option (gogoproto.marshaler_all) = true;
|
option (gogoproto.marshaler_all) = true;
|
||||||
option (gogoproto.sizer_all) = true;
|
option (gogoproto.sizer_all) = true;
|
||||||
option (gogoproto.unmarshaler_all) = true;
|
option (gogoproto.unmarshaler_all) = true;
|
||||||
|
|
@ -12,59 +22,45 @@ service Transporter {
|
||||||
rpc Transfer(Request) returns (Response) {}
|
rpc Transfer(Request) returns (Response) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
message Request {
|
|
||||||
enum Operation {
|
enum Operation {
|
||||||
Start = 0;
|
Start = 0;
|
||||||
Stop = 1;
|
Stop = 1;
|
||||||
Heartbeat = 2;
|
Heartbeat = 2;
|
||||||
}
|
}
|
||||||
enum Database {
|
|
||||||
etcdv2 = 0;
|
|
||||||
etcdv3 = 1;
|
|
||||||
zookeeper = 2;
|
|
||||||
consul = 3;
|
|
||||||
zetcd = 4;
|
|
||||||
cetcd = 5;
|
|
||||||
}
|
|
||||||
message Control {
|
|
||||||
string googleCloudProjectName = 1;
|
|
||||||
string googleCloudStorageKey = 2;
|
|
||||||
string googleCloudStorageBucketName = 3;
|
|
||||||
string googleCloudStorageSubDirectory = 4;
|
|
||||||
}
|
|
||||||
message Etcdv3 {
|
|
||||||
int64 snapCount = 1;
|
|
||||||
int64 quotaSizeBytes = 2;
|
|
||||||
}
|
|
||||||
message Zookeeper {
|
|
||||||
uint32 myID = 1;
|
|
||||||
int64 tickTime = 2;
|
|
||||||
int64 clientPort = 3;
|
|
||||||
int64 initLimit = 4;
|
|
||||||
int64 syncLimit = 5;
|
|
||||||
int64 snapCount = 6;
|
|
||||||
int64 maxClientConnections = 7;
|
|
||||||
}
|
|
||||||
|
|
||||||
Operation operation = 1;
|
message Request {
|
||||||
bool triggerLogUpload = 2;
|
Operation Operation = 1;
|
||||||
Database databaseID = 3;
|
bool TriggerLogUpload = 2;
|
||||||
string databaseTag = 4;
|
|
||||||
|
|
||||||
string peerIPsString = 5;
|
DatabaseID DatabaseID = 3;
|
||||||
uint32 ipIndex = 6;
|
string DatabaseTag = 4;
|
||||||
|
|
||||||
int64 currentClientNumber = 7;
|
string PeerIPsString = 5;
|
||||||
|
uint32 IPIndex = 6;
|
||||||
|
|
||||||
Control control = 8;
|
int64 CurrentClientNumber = 7;
|
||||||
Etcdv3 etcdv3Config = 9;
|
|
||||||
Zookeeper zookeeperConfig = 10;
|
ConfigClientMachineInitial ConfigClientMachineInitial = 8;
|
||||||
|
|
||||||
|
flag__etcd__v2_3 flag__etcd__v2_3 = 100;
|
||||||
|
flag__etcd__v3_1 flag__etcd__v3_1 = 101;
|
||||||
|
flag__etcd__v3_2 flag__etcd__v3_2 = 102;
|
||||||
|
flag__etcd__tip flag__etcd__tip = 103;
|
||||||
|
|
||||||
|
flag__zookeeper__r3_4_9 flag__zookeeper__r3_4_9 = 200;
|
||||||
|
flag__zookeeper__r3_5_2_alpha flag__zookeeper__r3_5_2_alpha = 201;
|
||||||
|
|
||||||
|
flag__consul__v0_7_5 flag__consul__v0_7_5 = 300;
|
||||||
|
flag__consul__v0_8_0 flag__consul__v0_8_0 = 301;
|
||||||
|
|
||||||
|
flag__cetcd__beta flag__cetcd__beta = 400;
|
||||||
|
flag__zetcd__beta flag__zetcd__beta = 500;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Response {
|
message Response {
|
||||||
bool success = 1;
|
bool Success = 1;
|
||||||
|
|
||||||
// DiskSpaceUsageBytes is the data size of the database on disk in bytes.
|
// DiskSpaceUsageBytes is the data size of the database on disk in bytes.
|
||||||
// It measures after database is requested to stop.
|
// It measures after database is requested to stop.
|
||||||
int64 diskSpaceUsageBytes = 2;
|
int64 DiskSpaceUsageBytes = 2;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,93 @@
|
||||||
|
// Copyright 2017 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package dbtesterpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"image/color"
|
||||||
|
|
||||||
|
"github.com/gonum/plot/plotutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsValidDatabaseID returns false if the database id is not supported.
|
||||||
|
func IsValidDatabaseID(id string) bool {
|
||||||
|
_, ok := DatabaseID_value[id]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetRGBI(databaseID string, i int) color.Color {
|
||||||
|
switch databaseID {
|
||||||
|
case "etcd__v2_3":
|
||||||
|
return color.RGBA{218, 97, 229, 255} // purple
|
||||||
|
case "etcd__v3_1":
|
||||||
|
return color.RGBA{24, 90, 169, 255} // blue
|
||||||
|
case "etcd__v3_2":
|
||||||
|
return color.RGBA{63, 81, 181, 255} // indigo
|
||||||
|
case "etcd__tip":
|
||||||
|
return color.RGBA{0, 229, 255, 255} // cyan
|
||||||
|
case "zookeeper__r3_5_2_alpha":
|
||||||
|
return color.RGBA{38, 169, 24, 255} // green
|
||||||
|
case "consul__v0_7_5":
|
||||||
|
return color.RGBA{198, 53, 53, 255} // red
|
||||||
|
case "zetcd__beta":
|
||||||
|
return color.RGBA{251, 206, 0, 255} // yellow
|
||||||
|
case "cetcd__beta":
|
||||||
|
return color.RGBA{205, 220, 57, 255} // lime
|
||||||
|
}
|
||||||
|
return plotutil.Color(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetRGBII(databaseID string, i int) color.Color {
|
||||||
|
switch databaseID {
|
||||||
|
case "etcd__v2_3":
|
||||||
|
return color.RGBA{229, 212, 231, 255} // light-purple
|
||||||
|
case "etcd__v3_1":
|
||||||
|
return color.RGBA{129, 212, 247, 255} // light-blue
|
||||||
|
case "etcd__v3_2":
|
||||||
|
return color.RGBA{159, 168, 218, 255} // light-indigo
|
||||||
|
case "etcd__tip":
|
||||||
|
return color.RGBA{132, 255, 255, 255} // light-cyan
|
||||||
|
case "zookeeper__r3_5_2_alpha":
|
||||||
|
return color.RGBA{129, 247, 152, 255} // light-green
|
||||||
|
case "consul__v0_7_5":
|
||||||
|
return color.RGBA{247, 156, 156, 255} // light-red
|
||||||
|
case "zetcd__beta":
|
||||||
|
return color.RGBA{245, 247, 166, 255} // light-yellow
|
||||||
|
case "cetcd__beta":
|
||||||
|
return color.RGBA{238, 255, 65, 255} // light-lime
|
||||||
|
}
|
||||||
|
return plotutil.Color(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetRGBIII(databaseID string, i int) color.Color {
|
||||||
|
switch databaseID {
|
||||||
|
case "etcd__v2_3":
|
||||||
|
return color.RGBA{165, 8, 180, 255} // deep-purple
|
||||||
|
case "etcd__v3_1":
|
||||||
|
return color.RGBA{37, 29, 191, 255} // deep-blue
|
||||||
|
case "etcd__v3_2":
|
||||||
|
return color.RGBA{26, 35, 126, 255} // deep-indigo
|
||||||
|
case "etcd__tip":
|
||||||
|
return color.RGBA{0, 96, 100, 255} // deep-cyan
|
||||||
|
case "zookeeper__r3_5_2_alpha":
|
||||||
|
return color.RGBA{7, 64, 35, 255} // deep-green
|
||||||
|
case "consul__v0_7_5":
|
||||||
|
return color.RGBA{212, 8, 46, 255} // deep-red
|
||||||
|
case "zetcd__beta":
|
||||||
|
return color.RGBA{229, 255, 0, 255} // deep-yellow
|
||||||
|
case "cetcd__beta":
|
||||||
|
return color.RGBA{205, 220, 57, 255} // deep-lime
|
||||||
|
}
|
||||||
|
return plotutil.Color(i)
|
||||||
|
}
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
hash: d814a9bd4d998e7b38f52abf3bf6284b4ce1b9e0d1bbb3aecfcedb9dcbf9de16
|
hash: fd49652adb4ec58cdca302dab916ff1d3bb80ccdf078dff45579becd0e75d613
|
||||||
updated: 2017-02-15T11:01:56.215783528-08:00
|
updated: 2017-02-22T19:06:28.732861827-08:00
|
||||||
imports:
|
imports:
|
||||||
- name: bitbucket.org/zombiezen/gopdf
|
- name: bitbucket.org/zombiezen/gopdf
|
||||||
version: 1c63dc69751bc45441c2ce1f56b631c55294b4d5
|
version: 1c63dc69751bc45441c2ce1f56b631c55294b4d5
|
||||||
|
|
@ -21,7 +21,7 @@ imports:
|
||||||
- name: github.com/cheggaaa/pb
|
- name: github.com/cheggaaa/pb
|
||||||
version: d7e6ca3010b6f084d8056847f55d7f572f180678
|
version: d7e6ca3010b6f084d8056847f55d7f572f180678
|
||||||
- name: github.com/coreos/etcd
|
- name: github.com/coreos/etcd
|
||||||
version: 2510a1488c51eb503c396c7e4e44ee910e4857a6
|
version: 86c9bf5c3f68707048e2cea63101e0d8d156331d
|
||||||
subpackages:
|
subpackages:
|
||||||
- auth/authpb
|
- auth/authpb
|
||||||
- client
|
- client
|
||||||
|
|
@ -76,7 +76,7 @@ imports:
|
||||||
subpackages:
|
subpackages:
|
||||||
- asm/f64
|
- asm/f64
|
||||||
- name: github.com/gonum/plot
|
- name: github.com/gonum/plot
|
||||||
version: 0a603a9b2d3cc81f4736b30c5155495fb900700d
|
version: e6157b766208b18c1fdb8ba52f1c6eb75b650314
|
||||||
subpackages:
|
subpackages:
|
||||||
- palette
|
- palette
|
||||||
- plotter
|
- plotter
|
||||||
|
|
@ -103,13 +103,13 @@ imports:
|
||||||
subpackages:
|
subpackages:
|
||||||
- schema
|
- schema
|
||||||
- name: github.com/hashicorp/consul
|
- name: github.com/hashicorp/consul
|
||||||
version: 3da73be55c82a7f88f1dfd3ec16d267970ac8ff0
|
version: 65b7ed462cc574d57dc6dcc9bca74676089f7fee
|
||||||
subpackages:
|
subpackages:
|
||||||
- api
|
- api
|
||||||
- name: github.com/hashicorp/go-cleanhttp
|
- name: github.com/hashicorp/go-cleanhttp
|
||||||
version: 3573b8b52aa7b37b9358d966a898feb387f62437
|
version: 3573b8b52aa7b37b9358d966a898feb387f62437
|
||||||
- name: github.com/hashicorp/serf
|
- name: github.com/hashicorp/serf
|
||||||
version: cb45b412ee4f9d6cc2eeb2b2b7dd0f6cfd7545c1
|
version: 050c56d31a1ff2ebc50e3f4810fdff7e7563f6c8
|
||||||
subpackages:
|
subpackages:
|
||||||
- coordinate
|
- coordinate
|
||||||
- name: github.com/inconshreveable/mousetrap
|
- name: github.com/inconshreveable/mousetrap
|
||||||
|
|
@ -138,7 +138,7 @@ imports:
|
||||||
subpackages:
|
subpackages:
|
||||||
- codec
|
- codec
|
||||||
- name: golang.org/x/image
|
- name: golang.org/x/image
|
||||||
version: 306b8294319cca33073246c5f75e5142f54f4cca
|
version: b952c941a68f1a00c1f8d855a12a8282d2f1b4f0
|
||||||
subpackages:
|
subpackages:
|
||||||
- draw
|
- draw
|
||||||
- font
|
- font
|
||||||
|
|
@ -180,7 +180,7 @@ imports:
|
||||||
- storage/v1
|
- storage/v1
|
||||||
- transport
|
- transport
|
||||||
- name: google.golang.org/appengine
|
- name: google.golang.org/appengine
|
||||||
version: 2e4a801b39fc199db615bfca7d0b9f8cd9580599
|
version: 3a452f9e00122ead39586d68ffdb9c6e1326af3c
|
||||||
subpackages:
|
subpackages:
|
||||||
- internal
|
- internal
|
||||||
- internal/app_identity
|
- internal/app_identity
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,7 @@ import:
|
||||||
- package: github.com/cheggaaa/pb
|
- package: github.com/cheggaaa/pb
|
||||||
version: d7e6ca3010b6f084d8056847f55d7f572f180678
|
version: d7e6ca3010b6f084d8056847f55d7f572f180678
|
||||||
- package: github.com/coreos/etcd
|
- package: github.com/coreos/etcd
|
||||||
version: 2510a1488c51eb503c396c7e4e44ee910e4857a6
|
version: 86c9bf5c3f68707048e2cea63101e0d8d156331d
|
||||||
subpackages:
|
subpackages:
|
||||||
- auth/authpb
|
- auth/authpb
|
||||||
- client
|
- client
|
||||||
|
|
@ -41,7 +41,7 @@ import:
|
||||||
- jsonpb
|
- jsonpb
|
||||||
- proto
|
- proto
|
||||||
- package: github.com/gonum/plot
|
- package: github.com/gonum/plot
|
||||||
version: 0a603a9b2d3cc81f4736b30c5155495fb900700d
|
version: e6157b766208b18c1fdb8ba52f1c6eb75b650314
|
||||||
subpackages:
|
subpackages:
|
||||||
- palette
|
- palette
|
||||||
- plotter
|
- plotter
|
||||||
|
|
@ -58,7 +58,7 @@ import:
|
||||||
- package: github.com/gyuho/psn
|
- package: github.com/gyuho/psn
|
||||||
version: 0a50f90209cfd81ef2a20cbf01679f118f31f0ad
|
version: 0a50f90209cfd81ef2a20cbf01679f118f31f0ad
|
||||||
- package: github.com/hashicorp/consul
|
- package: github.com/hashicorp/consul
|
||||||
version: 3da73be55c82a7f88f1dfd3ec16d267970ac8ff0
|
version: 65b7ed462cc574d57dc6dcc9bca74676089f7fee
|
||||||
subpackages:
|
subpackages:
|
||||||
- api
|
- api
|
||||||
- package: github.com/samuel/go-zookeeper
|
- package: github.com/samuel/go-zookeeper
|
||||||
|
|
|
||||||
18
readme.go
18
readme.go
|
|
@ -1,3 +1,17 @@
|
||||||
|
// Copyright 2017 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
package dbtester
|
package dbtester
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
@ -8,7 +22,7 @@ import (
|
||||||
|
|
||||||
// WriteREADME writes README.
|
// WriteREADME writes README.
|
||||||
func (cfg *Config) WriteREADME(summary string) error {
|
func (cfg *Config) WriteREADME(summary string) error {
|
||||||
plog.Printf("writing README at %q", cfg.README.OutputPath)
|
plog.Printf("writing README at %q", cfg.ConfigAnalyzeMachineREADME.OutputPath)
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
buf.WriteString("\n\n")
|
buf.WriteString("\n\n")
|
||||||
|
|
@ -33,5 +47,5 @@ func (cfg *Config) WriteREADME(summary string) error {
|
||||||
buf.WriteString("\n\n")
|
buf.WriteString("\n\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
return toFile(buf.String(), cfg.README.OutputPath)
|
return toFile(buf.String(), cfg.ConfigAnalyzeMachineREADME.OutputPath)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cheggaaa/pb"
|
"github.com/cheggaaa/pb"
|
||||||
|
"github.com/coreos/dbtester/dbtesterpb"
|
||||||
"github.com/coreos/etcd/pkg/report"
|
"github.com/coreos/etcd/pkg/report"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
@ -135,8 +136,8 @@ func printStats(st report.Stats) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *Config) generateReport(gcfg TestGroup, h []ReqHandler, reqDone func(), reqGen func(chan<- request)) {
|
func (cfg *Config) generateReport(gcfg dbtesterpb.ConfigClientMachineAgentControl, h []ReqHandler, reqDone func(), reqGen func(chan<- request)) {
|
||||||
b := newBenchmark(gcfg.RequestNumber, gcfg.ClientNumber, h, reqDone, reqGen)
|
b := newBenchmark(gcfg.ConfigClientMachineBenchmarkOptions.RequestNumber, gcfg.ConfigClientMachineBenchmarkOptions.ClientNumber, h, reqDone, reqGen)
|
||||||
b.startRequests()
|
b.startRequests()
|
||||||
b.waitAll()
|
b.waitAll()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -38,7 +38,7 @@ var DiskSpaceUsageSummaryColumns = []string{
|
||||||
|
|
||||||
// SaveDiskSpaceUsageSummary saves data size summary.
|
// SaveDiskSpaceUsageSummary saves data size summary.
|
||||||
func (cfg *Config) SaveDiskSpaceUsageSummary(databaseID string, idxToResponse map[int]dbtesterpb.Response) error {
|
func (cfg *Config) SaveDiskSpaceUsageSummary(databaseID string, idxToResponse map[int]dbtesterpb.Response) error {
|
||||||
gcfg, ok := cfg.DatabaseIDToTestGroup[databaseID]
|
gcfg, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[databaseID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("%q does not exist", databaseID)
|
return fmt.Errorf("%q does not exist", databaseID)
|
||||||
}
|
}
|
||||||
|
|
@ -68,7 +68,7 @@ func (cfg *Config) SaveDiskSpaceUsageSummary(databaseID string, idxToResponse ma
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fr.CSV(cfg.Control.ServerDiskSpaceUsageSummaryPath)
|
return fr.CSV(cfg.ConfigClientMachineInitial.ServerDiskSpaceUsageSummaryPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *Config) saveDataLatencyDistributionSummary(st report.Stats) {
|
func (cfg *Config) saveDataLatencyDistributionSummary(st report.Stats) {
|
||||||
|
|
@ -126,7 +126,7 @@ func (cfg *Config) saveDataLatencyDistributionSummary(st report.Stats) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := fr.CSVHorizontal(cfg.Control.ClientLatencyDistributionSummaryPath); err != nil {
|
if err := fr.CSVHorizontal(cfg.ConfigClientMachineInitial.ClientLatencyDistributionSummaryPath); err != nil {
|
||||||
plog.Fatal(err)
|
plog.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -152,7 +152,7 @@ func (cfg *Config) saveDataLatencyDistributionPercentile(st report.Stats) {
|
||||||
if err := fr.AddColumn(c2); err != nil {
|
if err := fr.AddColumn(c2); err != nil {
|
||||||
plog.Fatal(err)
|
plog.Fatal(err)
|
||||||
}
|
}
|
||||||
if err := fr.CSV(cfg.Control.ClientLatencyDistributionPercentilePath); err != nil {
|
if err := fr.CSV(cfg.ConfigClientMachineInitial.ClientLatencyDistributionPercentilePath); err != nil {
|
||||||
plog.Fatal(err)
|
plog.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -205,16 +205,16 @@ func (cfg *Config) saveDataLatencyDistributionAll(st report.Stats) {
|
||||||
if err := fr.AddColumn(c2); err != nil {
|
if err := fr.AddColumn(c2); err != nil {
|
||||||
plog.Fatal(err)
|
plog.Fatal(err)
|
||||||
}
|
}
|
||||||
if err := fr.CSV(cfg.Control.ClientLatencyDistributionAllPath); err != nil {
|
if err := fr.CSV(cfg.ConfigClientMachineInitial.ClientLatencyDistributionAllPath); err != nil {
|
||||||
plog.Fatal(err)
|
plog.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *Config) saveDataLatencyThroughputTimeseries(gcfg TestGroup, st report.Stats, clientNs []int64) {
|
func (cfg *Config) saveDataLatencyThroughputTimeseries(gcfg dbtesterpb.ConfigClientMachineAgentControl, st report.Stats, clientNs []int64) {
|
||||||
if len(clientNs) == 0 && len(gcfg.ConnectionClientNumbers) == 0 {
|
if len(clientNs) == 0 && len(gcfg.ConfigClientMachineBenchmarkOptions.ConnectionClientNumbers) == 0 {
|
||||||
clientNs = make([]int64, len(st.TimeSeries))
|
clientNs = make([]int64, len(st.TimeSeries))
|
||||||
for i := range clientNs {
|
for i := range clientNs {
|
||||||
clientNs[i] = gcfg.BenchmarkOptions.ClientNumber
|
clientNs[i] = gcfg.ConfigClientMachineBenchmarkOptions.ClientNumber
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c1 := dataframe.NewColumn("UNIX-SECOND")
|
c1 := dataframe.NewColumn("UNIX-SECOND")
|
||||||
|
|
@ -253,12 +253,12 @@ func (cfg *Config) saveDataLatencyThroughputTimeseries(gcfg TestGroup, st report
|
||||||
plog.Fatal(err)
|
plog.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := fr.CSV(cfg.Control.ClientLatencyThroughputTimeseriesPath); err != nil {
|
if err := fr.CSV(cfg.ConfigClientMachineInitial.ClientLatencyThroughputTimeseriesPath); err != nil {
|
||||||
plog.Fatal(err)
|
plog.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// aggregate latency by the number of keys
|
// aggregate latency by the number of keys
|
||||||
tss := FindRangesLatency(st.TimeSeries, 1000, gcfg.RequestNumber)
|
tss := FindRangesLatency(st.TimeSeries, 1000, gcfg.ConfigClientMachineBenchmarkOptions.RequestNumber)
|
||||||
ctt1 := dataframe.NewColumn("KEYS")
|
ctt1 := dataframe.NewColumn("KEYS")
|
||||||
ctt2 := dataframe.NewColumn("MIN-LATENCY-MS")
|
ctt2 := dataframe.NewColumn("MIN-LATENCY-MS")
|
||||||
ctt3 := dataframe.NewColumn("AVG-LATENCY-MS")
|
ctt3 := dataframe.NewColumn("AVG-LATENCY-MS")
|
||||||
|
|
@ -284,12 +284,12 @@ func (cfg *Config) saveDataLatencyThroughputTimeseries(gcfg TestGroup, st report
|
||||||
plog.Fatal(err)
|
plog.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := frr.CSV(cfg.Control.ClientLatencyByKeyNumberPath); err != nil {
|
if err := frr.CSV(cfg.ConfigClientMachineInitial.ClientLatencyByKeyNumberPath); err != nil {
|
||||||
plog.Fatal(err)
|
plog.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg *Config) saveAllStats(gcfg TestGroup, stats report.Stats, clientNs []int64) {
|
func (cfg *Config) saveAllStats(gcfg dbtesterpb.ConfigClientMachineAgentControl, stats report.Stats, clientNs []int64) {
|
||||||
cfg.saveDataLatencyDistributionSummary(stats)
|
cfg.saveDataLatencyDistributionSummary(stats)
|
||||||
cfg.saveDataLatencyDistributionPercentile(stats)
|
cfg.saveDataLatencyDistributionPercentile(stats)
|
||||||
cfg.saveDataLatencyDistributionAll(stats)
|
cfg.saveDataLatencyDistributionAll(stats)
|
||||||
|
|
@ -298,14 +298,14 @@ func (cfg *Config) saveAllStats(gcfg TestGroup, stats report.Stats, clientNs []i
|
||||||
|
|
||||||
// UploadToGoogle uploads target file to Google Cloud Storage.
|
// UploadToGoogle uploads target file to Google Cloud Storage.
|
||||||
func (cfg *Config) UploadToGoogle(databaseID string, targetPath string) error {
|
func (cfg *Config) UploadToGoogle(databaseID string, targetPath string) error {
|
||||||
gcfg, ok := cfg.DatabaseIDToTestGroup[databaseID]
|
gcfg, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[databaseID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("%q does not exist", databaseID)
|
return fmt.Errorf("%q does not exist", databaseID)
|
||||||
}
|
}
|
||||||
if !exist(targetPath) {
|
if !exist(targetPath) {
|
||||||
return fmt.Errorf("%q does not exist", targetPath)
|
return fmt.Errorf("%q does not exist", targetPath)
|
||||||
}
|
}
|
||||||
u, err := remotestorage.NewGoogleCloudStorage([]byte(cfg.Control.GoogleCloudStorageKey), cfg.Control.GoogleCloudProjectName)
|
u, err := remotestorage.NewGoogleCloudStorage([]byte(cfg.ConfigClientMachineInitial.GoogleCloudStorageKey), cfg.ConfigClientMachineInitial.GoogleCloudProjectName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -315,11 +315,11 @@ func (cfg *Config) UploadToGoogle(databaseID string, targetPath string) error {
|
||||||
if !strings.HasPrefix(dstPath, gcfg.DatabaseTag) {
|
if !strings.HasPrefix(dstPath, gcfg.DatabaseTag) {
|
||||||
dstPath = fmt.Sprintf("%s-%s", gcfg.DatabaseTag, dstPath)
|
dstPath = fmt.Sprintf("%s-%s", gcfg.DatabaseTag, dstPath)
|
||||||
}
|
}
|
||||||
dstPath = filepath.Join(cfg.Control.GoogleCloudStorageSubDirectory, dstPath)
|
dstPath = filepath.Join(cfg.ConfigClientMachineInitial.GoogleCloudStorageSubDirectory, dstPath)
|
||||||
|
|
||||||
var uerr error
|
var uerr error
|
||||||
for k := 0; k < 30; k++ {
|
for k := 0; k < 30; k++ {
|
||||||
if uerr = u.UploadFile(cfg.Control.GoogleCloudStorageBucketName, srcPath, dstPath); uerr != nil {
|
if uerr = u.UploadFile(cfg.ConfigClientMachineInitial.GoogleCloudStorageBucketName, srcPath, dstPath); uerr != nil {
|
||||||
plog.Printf("#%d: error %v while uploading %q", k, uerr, targetPath)
|
plog.Printf("#%d: error %v while uploading %q", k, uerr, targetPath)
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
continue
|
continue
|
||||||
|
|
|
||||||
118
stress.go
118
stress.go
|
|
@ -36,8 +36,8 @@ type values struct {
|
||||||
sampleSize int
|
sampleSize int
|
||||||
}
|
}
|
||||||
|
|
||||||
func newValues(gcfg TestGroup) (v values, rerr error) {
|
func newValues(gcfg dbtesterpb.ConfigClientMachineAgentControl) (v values, rerr error) {
|
||||||
v.bytes = [][]byte{randBytes(gcfg.BenchmarkOptions.ValueSizeBytes)}
|
v.bytes = [][]byte{randBytes(gcfg.ConfigClientMachineBenchmarkOptions.ValueSizeBytes)}
|
||||||
v.strings = []string{string(v.bytes[0])}
|
v.strings = []string{string(v.bytes[0])}
|
||||||
v.sampleSize = 1
|
v.sampleSize = 1
|
||||||
return
|
return
|
||||||
|
|
@ -45,7 +45,7 @@ func newValues(gcfg TestGroup) (v values, rerr error) {
|
||||||
|
|
||||||
// Stress stresses the database.
|
// Stress stresses the database.
|
||||||
func (cfg *Config) Stress(databaseID string) error {
|
func (cfg *Config) Stress(databaseID string) error {
|
||||||
gcfg, ok := cfg.DatabaseIDToTestGroup[databaseID]
|
gcfg, ok := cfg.DatabaseIDToConfigClientMachineAgentControl[databaseID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("%q does not exist", databaseID)
|
return fmt.Errorf("%q does not exist", databaseID)
|
||||||
}
|
}
|
||||||
|
|
@ -55,40 +55,40 @@ func (cfg *Config) Stress(databaseID string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
switch gcfg.BenchmarkOptions.Type {
|
switch gcfg.ConfigClientMachineBenchmarkOptions.Type {
|
||||||
case "write":
|
case "write":
|
||||||
plog.Println("write generateReport is started...")
|
plog.Println("write generateReport is started...")
|
||||||
|
|
||||||
// fixed number of client numbers
|
// fixed number of client numbers
|
||||||
if len(gcfg.BenchmarkOptions.ConnectionClientNumbers) == 0 {
|
if len(gcfg.ConfigClientMachineBenchmarkOptions.ConnectionClientNumbers) == 0 {
|
||||||
h, done := newWriteHandlers(gcfg)
|
h, done := newWriteHandlers(gcfg)
|
||||||
reqGen := func(inflightReqs chan<- request) { generateWrites(gcfg, 0, vals, inflightReqs) }
|
reqGen := func(inflightReqs chan<- request) { generateWrites(gcfg, 0, vals, inflightReqs) }
|
||||||
cfg.generateReport(gcfg, h, done, reqGen)
|
cfg.generateReport(gcfg, h, done, reqGen)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// variable client numbers
|
// variable client numbers
|
||||||
rs := assignRequest(gcfg.BenchmarkOptions.ConnectionClientNumbers, gcfg.BenchmarkOptions.RequestNumber)
|
rs := assignRequest(gcfg.ConfigClientMachineBenchmarkOptions.ConnectionClientNumbers, gcfg.ConfigClientMachineBenchmarkOptions.RequestNumber)
|
||||||
|
|
||||||
var stats []report.Stats
|
var stats []report.Stats
|
||||||
reqCompleted := int64(0)
|
reqCompleted := int64(0)
|
||||||
for i := 0; i < len(rs); i++ {
|
for i := 0; i < len(rs); i++ {
|
||||||
copied := gcfg
|
copied := gcfg
|
||||||
copied.BenchmarkOptions.ConnectionNumber = gcfg.BenchmarkOptions.ConnectionClientNumbers[i]
|
copied.ConfigClientMachineBenchmarkOptions.ConnectionNumber = gcfg.ConfigClientMachineBenchmarkOptions.ConnectionClientNumbers[i]
|
||||||
copied.BenchmarkOptions.ClientNumber = gcfg.BenchmarkOptions.ConnectionClientNumbers[i]
|
copied.ConfigClientMachineBenchmarkOptions.ClientNumber = gcfg.ConfigClientMachineBenchmarkOptions.ConnectionClientNumbers[i]
|
||||||
copied.BenchmarkOptions.RequestNumber = rs[i]
|
copied.ConfigClientMachineBenchmarkOptions.RequestNumber = rs[i]
|
||||||
ncfg := *cfg
|
ncfg := *cfg
|
||||||
ncfg.DatabaseIDToTestGroup[databaseID] = copied
|
ncfg.DatabaseIDToConfigClientMachineAgentControl[databaseID] = copied
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
plog.Infof("signaling agent with client number %d", copied.BenchmarkOptions.ClientNumber)
|
plog.Infof("signaling agent with client number %d", copied.ConfigClientMachineBenchmarkOptions.ClientNumber)
|
||||||
if _, err := (&ncfg).BroadcaseRequest(databaseID, dbtesterpb.Request_Heartbeat); err != nil {
|
if _, err := (&ncfg).BroadcaseRequest(databaseID, dbtesterpb.Operation_Heartbeat); err != nil {
|
||||||
plog.Panic(err)
|
plog.Panic(err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
h, done := newWriteHandlers(copied)
|
h, done := newWriteHandlers(copied)
|
||||||
reqGen := func(inflightReqs chan<- request) { generateWrites(copied, reqCompleted, vals, inflightReqs) }
|
reqGen := func(inflightReqs chan<- request) { generateWrites(copied, reqCompleted, vals, inflightReqs) }
|
||||||
b := newBenchmark(copied.BenchmarkOptions.RequestNumber, copied.BenchmarkOptions.ClientNumber, h, done, reqGen)
|
b := newBenchmark(copied.ConfigClientMachineBenchmarkOptions.RequestNumber, copied.ConfigClientMachineBenchmarkOptions.ClientNumber, h, done, reqGen)
|
||||||
|
|
||||||
// wait until rs[i] requests are finished
|
// wait until rs[i] requests are finished
|
||||||
// do not end reports yet
|
// do not end reports yet
|
||||||
|
|
@ -106,7 +106,7 @@ func (cfg *Config) Stress(databaseID string) error {
|
||||||
plog.Info("combining all reports")
|
plog.Info("combining all reports")
|
||||||
|
|
||||||
combined := report.Stats{ErrorDist: make(map[string]int)}
|
combined := report.Stats{ErrorDist: make(map[string]int)}
|
||||||
combinedClientNumber := make([]int64, 0, gcfg.BenchmarkOptions.RequestNumber)
|
combinedClientNumber := make([]int64, 0, gcfg.ConfigClientMachineBenchmarkOptions.RequestNumber)
|
||||||
for i, st := range stats {
|
for i, st := range stats {
|
||||||
combined.AvgTotal += st.AvgTotal
|
combined.AvgTotal += st.AvgTotal
|
||||||
combined.Total += st.Total
|
combined.Total += st.Total
|
||||||
|
|
@ -128,7 +128,7 @@ func (cfg *Config) Stress(databaseID string) error {
|
||||||
// So now we have two duplicate unix time seconds.
|
// So now we have two duplicate unix time seconds.
|
||||||
// This will be handled in aggregating by keys.
|
// This will be handled in aggregating by keys.
|
||||||
//
|
//
|
||||||
clientN := gcfg.BenchmarkOptions.ConnectionClientNumbers[i]
|
clientN := gcfg.ConfigClientMachineBenchmarkOptions.ConnectionClientNumbers[i]
|
||||||
clientNs := make([]int64, len(st.TimeSeries))
|
clientNs := make([]int64, len(st.TimeSeries))
|
||||||
for i := range st.TimeSeries {
|
for i := range st.TimeSeries {
|
||||||
clientNs[i] = clientN
|
clientNs[i] = clientN
|
||||||
|
|
@ -184,18 +184,18 @@ func (cfg *Config) Stress(databaseID string) error {
|
||||||
}
|
}
|
||||||
for k, v := range totalKeysFunc(gcfg.DatabaseEndpoints) {
|
for k, v := range totalKeysFunc(gcfg.DatabaseEndpoints) {
|
||||||
plog.Infof("expected write total results [expected_total: %d | database: %q | endpoint: %q | number_of_keys: %d]",
|
plog.Infof("expected write total results [expected_total: %d | database: %q | endpoint: %q | number_of_keys: %d]",
|
||||||
gcfg.BenchmarkOptions.RequestNumber, gcfg.DatabaseID, k, v)
|
gcfg.ConfigClientMachineBenchmarkOptions.RequestNumber, gcfg.DatabaseID, k, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
case "read":
|
case "read":
|
||||||
key, value := sameKey(gcfg.BenchmarkOptions.KeySizeBytes), vals.strings[0]
|
key, value := sameKey(gcfg.ConfigClientMachineBenchmarkOptions.KeySizeBytes), vals.strings[0]
|
||||||
|
|
||||||
switch gcfg.DatabaseID {
|
switch gcfg.DatabaseID {
|
||||||
case "etcdv2":
|
case "etcdv2":
|
||||||
plog.Infof("write started [request: PUT | key: %q | database: %q]", key, gcfg.DatabaseID)
|
plog.Infof("write started [request: PUT | key: %q | database: %q]", key, gcfg.DatabaseID)
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < 7; i++ {
|
for i := 0; i < 7; i++ {
|
||||||
clients := mustCreateClientsEtcdv2(gcfg.DatabaseEndpoints, gcfg.BenchmarkOptions.ConnectionNumber)
|
clients := mustCreateClientsEtcdv2(gcfg.DatabaseEndpoints, gcfg.ConfigClientMachineBenchmarkOptions.ConnectionNumber)
|
||||||
_, err = clients[0].Set(context.Background(), key, value, nil)
|
_, err = clients[0].Set(context.Background(), key, value, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
|
|
@ -232,7 +232,7 @@ func (cfg *Config) Stress(databaseID string) error {
|
||||||
plog.Infof("write started [request: PUT | key: %q | database: %q]", key, gcfg.DatabaseID)
|
plog.Infof("write started [request: PUT | key: %q | database: %q]", key, gcfg.DatabaseID)
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < 7; i++ {
|
for i := 0; i < 7; i++ {
|
||||||
conns := mustCreateConnsZk(gcfg.DatabaseEndpoints, gcfg.BenchmarkOptions.ConnectionNumber)
|
conns := mustCreateConnsZk(gcfg.DatabaseEndpoints, gcfg.ConfigClientMachineBenchmarkOptions.ConnectionNumber)
|
||||||
_, err = conns[0].Create("/"+key, vals.bytes[0], zkCreateFlags, zkCreateACL)
|
_, err = conns[0].Create("/"+key, vals.bytes[0], zkCreateFlags, zkCreateACL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
|
|
@ -252,7 +252,7 @@ func (cfg *Config) Stress(databaseID string) error {
|
||||||
plog.Infof("write started [request: PUT | key: %q | database: %q]", key, gcfg.DatabaseID)
|
plog.Infof("write started [request: PUT | key: %q | database: %q]", key, gcfg.DatabaseID)
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < 7; i++ {
|
for i := 0; i < 7; i++ {
|
||||||
clients := mustCreateConnsConsul(gcfg.DatabaseEndpoints, gcfg.BenchmarkOptions.ConnectionNumber)
|
clients := mustCreateConnsConsul(gcfg.DatabaseEndpoints, gcfg.ConfigClientMachineBenchmarkOptions.ConnectionNumber)
|
||||||
_, err = clients[0].Put(&consulapi.KVPair{Key: key, Value: vals.bytes[0]}, nil)
|
_, err = clients[0].Put(&consulapi.KVPair{Key: key, Value: vals.bytes[0]}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
|
|
@ -272,7 +272,7 @@ func (cfg *Config) Stress(databaseID string) error {
|
||||||
plog.Println("read generateReport is finished...")
|
plog.Println("read generateReport is finished...")
|
||||||
|
|
||||||
case "read-oneshot":
|
case "read-oneshot":
|
||||||
key, value := sameKey(gcfg.BenchmarkOptions.KeySizeBytes), vals.strings[0]
|
key, value := sameKey(gcfg.ConfigClientMachineBenchmarkOptions.KeySizeBytes), vals.strings[0]
|
||||||
plog.Infof("writing key for read-oneshot [key: %q | database: %q]", key, gcfg.DatabaseID)
|
plog.Infof("writing key for read-oneshot [key: %q | database: %q]", key, gcfg.DatabaseID)
|
||||||
var err error
|
var err error
|
||||||
switch gcfg.DatabaseID {
|
switch gcfg.DatabaseID {
|
||||||
|
|
@ -311,18 +311,18 @@ func (cfg *Config) Stress(databaseID string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newReadHandlers(gcfg TestGroup) (rhs []ReqHandler, done func()) {
|
func newReadHandlers(gcfg dbtesterpb.ConfigClientMachineAgentControl) (rhs []ReqHandler, done func()) {
|
||||||
rhs = make([]ReqHandler, gcfg.BenchmarkOptions.ClientNumber)
|
rhs = make([]ReqHandler, gcfg.ConfigClientMachineBenchmarkOptions.ClientNumber)
|
||||||
switch gcfg.DatabaseID {
|
switch gcfg.DatabaseID {
|
||||||
case "etcdv2":
|
case "etcdv2":
|
||||||
conns := mustCreateClientsEtcdv2(gcfg.DatabaseEndpoints, gcfg.BenchmarkOptions.ConnectionNumber)
|
conns := mustCreateClientsEtcdv2(gcfg.DatabaseEndpoints, gcfg.ConfigClientMachineBenchmarkOptions.ConnectionNumber)
|
||||||
for i := range conns {
|
for i := range conns {
|
||||||
rhs[i] = newGetEtcd2(conns[i])
|
rhs[i] = newGetEtcd2(conns[i])
|
||||||
}
|
}
|
||||||
case "etcdv3", "etcdtip":
|
case "etcdv3", "etcdtip":
|
||||||
clients := mustCreateClientsEtcdv3(gcfg.DatabaseEndpoints, etcdv3ClientCfg{
|
clients := mustCreateClientsEtcdv3(gcfg.DatabaseEndpoints, etcdv3ClientCfg{
|
||||||
totalConns: gcfg.BenchmarkOptions.ConnectionNumber,
|
totalConns: gcfg.ConfigClientMachineBenchmarkOptions.ConnectionNumber,
|
||||||
totalClients: gcfg.BenchmarkOptions.ClientNumber,
|
totalClients: gcfg.ConfigClientMachineBenchmarkOptions.ClientNumber,
|
||||||
})
|
})
|
||||||
for i := range clients {
|
for i := range clients {
|
||||||
rhs[i] = newGetEtcd3(clients[i].KV)
|
rhs[i] = newGetEtcd3(clients[i].KV)
|
||||||
|
|
@ -333,7 +333,7 @@ func newReadHandlers(gcfg TestGroup) (rhs []ReqHandler, done func()) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case "zookeeper", "zetcd":
|
case "zookeeper", "zetcd":
|
||||||
conns := mustCreateConnsZk(gcfg.DatabaseEndpoints, gcfg.BenchmarkOptions.ConnectionNumber)
|
conns := mustCreateConnsZk(gcfg.DatabaseEndpoints, gcfg.ConfigClientMachineBenchmarkOptions.ConnectionNumber)
|
||||||
for i := range conns {
|
for i := range conns {
|
||||||
rhs[i] = newGetZK(conns[i])
|
rhs[i] = newGetZK(conns[i])
|
||||||
}
|
}
|
||||||
|
|
@ -343,7 +343,7 @@ func newReadHandlers(gcfg TestGroup) (rhs []ReqHandler, done func()) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case "consul", "cetcd":
|
case "consul", "cetcd":
|
||||||
conns := mustCreateConnsConsul(gcfg.DatabaseEndpoints, gcfg.BenchmarkOptions.ConnectionNumber)
|
conns := mustCreateConnsConsul(gcfg.DatabaseEndpoints, gcfg.ConfigClientMachineBenchmarkOptions.ConnectionNumber)
|
||||||
for i := range conns {
|
for i := range conns {
|
||||||
rhs[i] = newGetConsul(conns[i])
|
rhs[i] = newGetConsul(conns[i])
|
||||||
}
|
}
|
||||||
|
|
@ -351,18 +351,18 @@ func newReadHandlers(gcfg TestGroup) (rhs []ReqHandler, done func()) {
|
||||||
return rhs, done
|
return rhs, done
|
||||||
}
|
}
|
||||||
|
|
||||||
func newWriteHandlers(gcfg TestGroup) (rhs []ReqHandler, done func()) {
|
func newWriteHandlers(gcfg dbtesterpb.ConfigClientMachineAgentControl) (rhs []ReqHandler, done func()) {
|
||||||
rhs = make([]ReqHandler, gcfg.BenchmarkOptions.ClientNumber)
|
rhs = make([]ReqHandler, gcfg.ConfigClientMachineBenchmarkOptions.ClientNumber)
|
||||||
switch gcfg.DatabaseID {
|
switch gcfg.DatabaseID {
|
||||||
case "etcdv2":
|
case "etcdv2":
|
||||||
conns := mustCreateClientsEtcdv2(gcfg.DatabaseEndpoints, gcfg.BenchmarkOptions.ConnectionNumber)
|
conns := mustCreateClientsEtcdv2(gcfg.DatabaseEndpoints, gcfg.ConfigClientMachineBenchmarkOptions.ConnectionNumber)
|
||||||
for i := range conns {
|
for i := range conns {
|
||||||
rhs[i] = newPutEtcd2(conns[i])
|
rhs[i] = newPutEtcd2(conns[i])
|
||||||
}
|
}
|
||||||
case "etcdv3", "etcdtip":
|
case "etcdv3", "etcdtip":
|
||||||
etcdClients := mustCreateClientsEtcdv3(gcfg.DatabaseEndpoints, etcdv3ClientCfg{
|
etcdClients := mustCreateClientsEtcdv3(gcfg.DatabaseEndpoints, etcdv3ClientCfg{
|
||||||
totalConns: gcfg.BenchmarkOptions.ConnectionNumber,
|
totalConns: gcfg.ConfigClientMachineBenchmarkOptions.ConnectionNumber,
|
||||||
totalClients: gcfg.BenchmarkOptions.ClientNumber,
|
totalClients: gcfg.ConfigClientMachineBenchmarkOptions.ClientNumber,
|
||||||
})
|
})
|
||||||
for i := range etcdClients {
|
for i := range etcdClients {
|
||||||
rhs[i] = newPutEtcd3(etcdClients[i])
|
rhs[i] = newPutEtcd3(etcdClients[i])
|
||||||
|
|
@ -373,13 +373,13 @@ func newWriteHandlers(gcfg TestGroup) (rhs []ReqHandler, done func()) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case "zookeeper", "zetcd":
|
case "zookeeper", "zetcd":
|
||||||
if gcfg.BenchmarkOptions.SameKey {
|
if gcfg.ConfigClientMachineBenchmarkOptions.SameKey {
|
||||||
key := sameKey(gcfg.BenchmarkOptions.KeySizeBytes)
|
key := sameKey(gcfg.ConfigClientMachineBenchmarkOptions.KeySizeBytes)
|
||||||
valueBts := randBytes(gcfg.BenchmarkOptions.ValueSizeBytes)
|
valueBts := randBytes(gcfg.ConfigClientMachineBenchmarkOptions.ValueSizeBytes)
|
||||||
plog.Infof("write started [request: PUT | key: %q | database: %q]", key, gcfg.DatabaseID)
|
plog.Infof("write started [request: PUT | key: %q | database: %q]", key, gcfg.DatabaseID)
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < 7; i++ {
|
for i := 0; i < 7; i++ {
|
||||||
conns := mustCreateConnsZk(gcfg.DatabaseEndpoints, gcfg.BenchmarkOptions.ConnectionNumber)
|
conns := mustCreateConnsZk(gcfg.DatabaseEndpoints, gcfg.ConfigClientMachineBenchmarkOptions.ConnectionNumber)
|
||||||
_, err = conns[0].Create("/"+key, valueBts, zkCreateFlags, zkCreateACL)
|
_, err = conns[0].Create("/"+key, valueBts, zkCreateFlags, zkCreateACL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
|
|
@ -396,9 +396,9 @@ func newWriteHandlers(gcfg TestGroup) (rhs []ReqHandler, done func()) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
conns := mustCreateConnsZk(gcfg.DatabaseEndpoints, gcfg.BenchmarkOptions.ConnectionNumber)
|
conns := mustCreateConnsZk(gcfg.DatabaseEndpoints, gcfg.ConfigClientMachineBenchmarkOptions.ConnectionNumber)
|
||||||
for i := range conns {
|
for i := range conns {
|
||||||
if gcfg.BenchmarkOptions.SameKey {
|
if gcfg.ConfigClientMachineBenchmarkOptions.SameKey {
|
||||||
rhs[i] = newPutOverwriteZK(conns[i])
|
rhs[i] = newPutOverwriteZK(conns[i])
|
||||||
} else {
|
} else {
|
||||||
rhs[i] = newPutCreateZK(conns[i])
|
rhs[i] = newPutCreateZK(conns[i])
|
||||||
|
|
@ -410,7 +410,7 @@ func newWriteHandlers(gcfg TestGroup) (rhs []ReqHandler, done func()) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case "consul", "cetcd":
|
case "consul", "cetcd":
|
||||||
conns := mustCreateConnsConsul(gcfg.DatabaseEndpoints, gcfg.BenchmarkOptions.ConnectionNumber)
|
conns := mustCreateConnsConsul(gcfg.DatabaseEndpoints, gcfg.ConfigClientMachineBenchmarkOptions.ConnectionNumber)
|
||||||
for i := range conns {
|
for i := range conns {
|
||||||
rhs[i] = newPutConsul(conns[i])
|
rhs[i] = newPutConsul(conns[i])
|
||||||
}
|
}
|
||||||
|
|
@ -424,8 +424,8 @@ func newWriteHandlers(gcfg TestGroup) (rhs []ReqHandler, done func()) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func newReadOneshotHandlers(gcfg TestGroup) []ReqHandler {
|
func newReadOneshotHandlers(gcfg dbtesterpb.ConfigClientMachineAgentControl) []ReqHandler {
|
||||||
rhs := make([]ReqHandler, gcfg.BenchmarkOptions.ClientNumber)
|
rhs := make([]ReqHandler, gcfg.ConfigClientMachineBenchmarkOptions.ClientNumber)
|
||||||
switch gcfg.DatabaseID {
|
switch gcfg.DatabaseID {
|
||||||
case "etcdv2":
|
case "etcdv2":
|
||||||
for i := range rhs {
|
for i := range rhs {
|
||||||
|
|
@ -448,7 +448,7 @@ func newReadOneshotHandlers(gcfg TestGroup) []ReqHandler {
|
||||||
case "zookeeper", "zetcd":
|
case "zookeeper", "zetcd":
|
||||||
for i := range rhs {
|
for i := range rhs {
|
||||||
rhs[i] = func(ctx context.Context, req *request) error {
|
rhs[i] = func(ctx context.Context, req *request) error {
|
||||||
conns := mustCreateConnsZk(gcfg.DatabaseEndpoints, gcfg.BenchmarkOptions.ConnectionNumber)
|
conns := mustCreateConnsZk(gcfg.DatabaseEndpoints, gcfg.ConfigClientMachineBenchmarkOptions.ConnectionNumber)
|
||||||
defer conns[0].Close()
|
defer conns[0].Close()
|
||||||
return newGetZK(conns[0])(ctx, req)
|
return newGetZK(conns[0])(ctx, req)
|
||||||
}
|
}
|
||||||
|
|
@ -464,18 +464,18 @@ func newReadOneshotHandlers(gcfg TestGroup) []ReqHandler {
|
||||||
return rhs
|
return rhs
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateReads(gcfg TestGroup, key string, inflightReqs chan<- request) {
|
func generateReads(gcfg dbtesterpb.ConfigClientMachineAgentControl, key string, inflightReqs chan<- request) {
|
||||||
defer close(inflightReqs)
|
defer close(inflightReqs)
|
||||||
|
|
||||||
var rateLimiter *rate.Limiter
|
var rateLimiter *rate.Limiter
|
||||||
if gcfg.BenchmarkOptions.RateLimitRequestsPerSecond > 0 {
|
if gcfg.ConfigClientMachineBenchmarkOptions.RateLimitRequestsPerSecond > 0 {
|
||||||
rateLimiter = rate.NewLimiter(
|
rateLimiter = rate.NewLimiter(
|
||||||
rate.Limit(gcfg.BenchmarkOptions.RateLimitRequestsPerSecond),
|
rate.Limit(gcfg.ConfigClientMachineBenchmarkOptions.RateLimitRequestsPerSecond),
|
||||||
int(gcfg.BenchmarkOptions.RateLimitRequestsPerSecond),
|
int(gcfg.ConfigClientMachineBenchmarkOptions.RateLimitRequestsPerSecond),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := int64(0); i < gcfg.BenchmarkOptions.RequestNumber; i++ {
|
for i := int64(0); i < gcfg.ConfigClientMachineBenchmarkOptions.RequestNumber; i++ {
|
||||||
if rateLimiter != nil {
|
if rateLimiter != nil {
|
||||||
rateLimiter.Wait(context.TODO())
|
rateLimiter.Wait(context.TODO())
|
||||||
}
|
}
|
||||||
|
|
@ -487,21 +487,21 @@ func generateReads(gcfg TestGroup, key string, inflightReqs chan<- request) {
|
||||||
|
|
||||||
case "etcdv3", "etcdtip":
|
case "etcdv3", "etcdtip":
|
||||||
opts := []clientv3.OpOption{clientv3.WithRange("")}
|
opts := []clientv3.OpOption{clientv3.WithRange("")}
|
||||||
if gcfg.BenchmarkOptions.StaleRead {
|
if gcfg.ConfigClientMachineBenchmarkOptions.StaleRead {
|
||||||
opts = append(opts, clientv3.WithSerializable())
|
opts = append(opts, clientv3.WithSerializable())
|
||||||
}
|
}
|
||||||
inflightReqs <- request{etcdv3Op: clientv3.OpGet(key, opts...)}
|
inflightReqs <- request{etcdv3Op: clientv3.OpGet(key, opts...)}
|
||||||
|
|
||||||
case "zookeeper", "zetcd":
|
case "zookeeper", "zetcd":
|
||||||
op := zkOp{key: key}
|
op := zkOp{key: key}
|
||||||
if gcfg.BenchmarkOptions.StaleRead {
|
if gcfg.ConfigClientMachineBenchmarkOptions.StaleRead {
|
||||||
op.staleRead = true
|
op.staleRead = true
|
||||||
}
|
}
|
||||||
inflightReqs <- request{zkOp: op}
|
inflightReqs <- request{zkOp: op}
|
||||||
|
|
||||||
case "consul", "cetcd":
|
case "consul", "cetcd":
|
||||||
op := consulOp{key: key}
|
op := consulOp{key: key}
|
||||||
if gcfg.BenchmarkOptions.StaleRead {
|
if gcfg.ConfigClientMachineBenchmarkOptions.StaleRead {
|
||||||
op.staleRead = true
|
op.staleRead = true
|
||||||
}
|
}
|
||||||
inflightReqs <- request{consulOp: op}
|
inflightReqs <- request{consulOp: op}
|
||||||
|
|
@ -509,12 +509,12 @@ func generateReads(gcfg TestGroup, key string, inflightReqs chan<- request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateWrites(gcfg TestGroup, startIdx int64, vals values, inflightReqs chan<- request) {
|
func generateWrites(gcfg dbtesterpb.ConfigClientMachineAgentControl, startIdx int64, vals values, inflightReqs chan<- request) {
|
||||||
var rateLimiter *rate.Limiter
|
var rateLimiter *rate.Limiter
|
||||||
if gcfg.BenchmarkOptions.RateLimitRequestsPerSecond > 0 {
|
if gcfg.ConfigClientMachineBenchmarkOptions.RateLimitRequestsPerSecond > 0 {
|
||||||
rateLimiter = rate.NewLimiter(
|
rateLimiter = rate.NewLimiter(
|
||||||
rate.Limit(gcfg.BenchmarkOptions.RateLimitRequestsPerSecond),
|
rate.Limit(gcfg.ConfigClientMachineBenchmarkOptions.RateLimitRequestsPerSecond),
|
||||||
int(gcfg.BenchmarkOptions.RateLimitRequestsPerSecond),
|
int(gcfg.ConfigClientMachineBenchmarkOptions.RateLimitRequestsPerSecond),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -524,10 +524,10 @@ func generateWrites(gcfg TestGroup, startIdx int64, vals values, inflightReqs ch
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for i := int64(0); i < gcfg.BenchmarkOptions.RequestNumber; i++ {
|
for i := int64(0); i < gcfg.ConfigClientMachineBenchmarkOptions.RequestNumber; i++ {
|
||||||
k := sequentialKey(gcfg.BenchmarkOptions.KeySizeBytes, i+startIdx)
|
k := sequentialKey(gcfg.ConfigClientMachineBenchmarkOptions.KeySizeBytes, i+startIdx)
|
||||||
if gcfg.BenchmarkOptions.SameKey {
|
if gcfg.ConfigClientMachineBenchmarkOptions.SameKey {
|
||||||
k = sameKey(gcfg.BenchmarkOptions.KeySizeBytes)
|
k = sameKey(gcfg.ConfigClientMachineBenchmarkOptions.KeySizeBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
v := vals.bytes[i%int64(vals.sampleSize)]
|
v := vals.bytes[i%int64(vals.sampleSize)]
|
||||||
|
|
|
||||||
|
|
@ -1,18 +1,19 @@
|
||||||
test_title: Write 1M keys, 256-byte key, 1KB value value, clients 1 to 1,000
|
test_title: Write 1M keys, 256-byte key, 1KB value value, clients 1 to 1,000
|
||||||
test_description: |
|
test_description: |
|
||||||
- Google Cloud Compute Engine
|
- Google Cloud Compute Engine
|
||||||
- 4 machines of 16 vCPUs + 30 GB Memory + 300 GB SSD (1 for client)
|
- 4 machines of 16 vCPUs + 60 GB Memory + 300 GB SSD (1 for client)
|
||||||
- Ubuntu 16.10
|
- Ubuntu 16.10
|
||||||
- etcd v3.1 (Go 1.7.5)
|
- etcd tip (Go 1.8.0)
|
||||||
- Zookeeper r3.4.9
|
- Zookeeper r3.5.2-alpha
|
||||||
- Java 8
|
- Java 8
|
||||||
- javac 1.8.0_121
|
- javac 1.8.0_121
|
||||||
- Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
|
- Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
|
||||||
- Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)
|
- Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)
|
||||||
- Consul v0.7.4 (Go 1.7.5)
|
- `/usr/bin/java -Djute.maxbuffer=33554432 -Xms50G -Xmx50G`
|
||||||
|
- Consul v0.7.5 (Go 1.8.0)
|
||||||
|
|
||||||
# common control options for all client machines
|
# common control options for all client machines
|
||||||
control:
|
config_client_machine_initial:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: /home/gyuho
|
path_prefix: /home/gyuho
|
||||||
log_path: client-control.log
|
log_path: client-control.log
|
||||||
|
|
@ -30,21 +31,21 @@ control:
|
||||||
# set this in 'control' machine, to automate log uploading in remote 'agent' machines
|
# set this in 'control' machine, to automate log uploading in remote 'agent' machines
|
||||||
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
||||||
google_cloud_storage_bucket_name: dbtester-results
|
google_cloud_storage_bucket_name: dbtester-results
|
||||||
google_cloud_storage_sub_directory: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable
|
google_cloud_storage_sub_directory: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable
|
||||||
|
|
||||||
all_database_id_list: [etcdv3, zookeeper, consul]
|
all_database_id_list: [etcd__tip, zookeeper__r3_5_2_alpha, consul__v0_7_5]
|
||||||
|
|
||||||
datatbase_id_to_test_group:
|
datatbase_id_to_config_client_machine_agent_control:
|
||||||
etcdv3:
|
etcd__tip:
|
||||||
database_description: etcd v3.1 (Go 1.7.5)
|
database_description: etcd tip (Go 1.8.0)
|
||||||
peer_ips:
|
peer_ips:
|
||||||
- 10.240.0.20
|
- 10.240.0.7
|
||||||
- 10.240.0.21
|
- 10.240.0.8
|
||||||
- 10.240.0.22
|
- 10.240.0.12
|
||||||
database_port_to_connect: 2379
|
database_port_to_connect: 2379
|
||||||
agent_port_to_connect: 3500
|
agent_port_to_connect: 3500
|
||||||
|
|
||||||
etcdv3:
|
etcd__tip:
|
||||||
# --snapshot-count
|
# --snapshot-count
|
||||||
snap_count: 100000
|
snap_count: 100000
|
||||||
# --quota-backend-bytes; 8 GB
|
# --quota-backend-bytes; 8 GB
|
||||||
|
|
@ -74,17 +75,25 @@ datatbase_id_to_test_group:
|
||||||
step3_stop_database: true
|
step3_stop_database: true
|
||||||
step4_upload_logs: true
|
step4_upload_logs: true
|
||||||
|
|
||||||
zookeeper:
|
zookeeper__r3_5_2_alpha:
|
||||||
database_description: Zookeeper r3.4.9 (Java 8)
|
database_description: Zookeeper r3.5.2-alpha (Java 8)
|
||||||
peer_ips:
|
peer_ips:
|
||||||
- 10.240.0.25
|
- 10.240.0.21
|
||||||
- 10.240.0.27
|
- 10.240.0.22
|
||||||
- 10.240.0.28
|
- 10.240.0.23
|
||||||
database_port_to_connect: 2181
|
database_port_to_connect: 2181
|
||||||
agent_port_to_connect: 3500
|
agent_port_to_connect: 3500
|
||||||
|
|
||||||
# http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html
|
# http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html
|
||||||
zookeeper:
|
zookeeper__r3_5_2_alpha:
|
||||||
|
# maximum size, in bytes, of a request or response
|
||||||
|
# set it to 33 MB
|
||||||
|
java_d_jute_max_buffer: 33554432
|
||||||
|
|
||||||
|
# JVM min,max heap size
|
||||||
|
java_xms: 50G
|
||||||
|
java_xmx: 50G
|
||||||
|
|
||||||
# tickTime; the length of a single tick, which is the basic time unit used by ZooKeeper,
|
# tickTime; the length of a single tick, which is the basic time unit used by ZooKeeper,
|
||||||
# as measured in milliseconds.
|
# as measured in milliseconds.
|
||||||
tick_time: 2000
|
tick_time: 2000
|
||||||
|
|
@ -130,12 +139,12 @@ datatbase_id_to_test_group:
|
||||||
step3_stop_database: true
|
step3_stop_database: true
|
||||||
step4_upload_logs: true
|
step4_upload_logs: true
|
||||||
|
|
||||||
consul:
|
consul__v0_7_5:
|
||||||
database_description: Consul v0.7.4 (Go 1.7.5)
|
database_description: Consul v0.7.5 (Go 1.8.0)
|
||||||
peer_ips:
|
peer_ips:
|
||||||
- 10.240.0.30
|
- 10.240.0.27
|
||||||
- 10.240.0.31
|
- 10.240.0.28
|
||||||
- 10.240.0.33
|
- 10.240.0.29
|
||||||
database_port_to_connect: 8500
|
database_port_to_connect: 8500
|
||||||
agent_port_to_connect: 3500
|
agent_port_to_connect: 3500
|
||||||
|
|
||||||
|
|
@ -164,10 +173,10 @@ datatbase_id_to_test_group:
|
||||||
step4_upload_logs: true
|
step4_upload_logs: true
|
||||||
|
|
||||||
|
|
||||||
datatbase_id_to_test_data:
|
datatbase_id_to_config_analyze_machine_initial:
|
||||||
etcdv3:
|
etcd__tip:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.5
|
path_prefix: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-tip-go1.8.0
|
||||||
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
||||||
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
||||||
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
||||||
|
|
@ -184,9 +193,9 @@ datatbase_id_to_test_data:
|
||||||
- 3-server-system-metrics-interpolated.csv
|
- 3-server-system-metrics-interpolated.csv
|
||||||
all_aggregated_output_path: all-aggregated.csv
|
all_aggregated_output_path: all-aggregated.csv
|
||||||
|
|
||||||
zookeeper:
|
zookeeper__r3_5_2_alpha:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8
|
path_prefix: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.5.2-alpha-java8
|
||||||
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
||||||
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
||||||
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
||||||
|
|
@ -203,9 +212,9 @@ datatbase_id_to_test_data:
|
||||||
- 3-server-system-metrics-interpolated.csv
|
- 3-server-system-metrics-interpolated.csv
|
||||||
all_aggregated_output_path: all-aggregated.csv
|
all_aggregated_output_path: all-aggregated.csv
|
||||||
|
|
||||||
consul:
|
consul__v0_7_5:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.4-go1.7.5
|
path_prefix: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.5-go1.8.0
|
||||||
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
||||||
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
||||||
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
||||||
|
|
@ -222,12 +231,12 @@ datatbase_id_to_test_data:
|
||||||
- 3-server-system-metrics-interpolated.csv
|
- 3-server-system-metrics-interpolated.csv
|
||||||
all_aggregated_output_path: all-aggregated.csv
|
all_aggregated_output_path: all-aggregated.csv
|
||||||
|
|
||||||
analyze:
|
analyze_all_aggregated_output:
|
||||||
all_aggregated_output_path_csv: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/all-aggregated.csv
|
all_aggregated_output_path_csv: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/all-aggregated.csv
|
||||||
all_aggregated_output_path_txt: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/all-aggregated.txt
|
all_aggregated_output_path_txt: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/all-aggregated.txt
|
||||||
|
|
||||||
plot_path_prefix: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable
|
analyze_plot_path_prefix: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable
|
||||||
plot_list:
|
analyze_plot_list:
|
||||||
- column: AVG-LATENCY-MS
|
- column: AVG-LATENCY-MS
|
||||||
x_axis: Second
|
x_axis: Second
|
||||||
y_axis: Latency(millisecond)
|
y_axis: Latency(millisecond)
|
||||||
|
|
@ -288,82 +297,82 @@ plot_list:
|
||||||
x_axis: Second
|
x_axis: Second
|
||||||
y_axis: Network Transmit(bytes) (Delta per Second)
|
y_axis: Network Transmit(bytes) (Delta per Second)
|
||||||
|
|
||||||
readme:
|
analyze_readme:
|
||||||
output_path: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/README.md
|
output_path: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/README.md
|
||||||
|
|
||||||
images:
|
images:
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-THROUGHPUT.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VOLUNTARY-CTXT-SWITCHES.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-NON-VOLUNTARY-CTXT-SWITCHES.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-CPU.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/MAX-CPU.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READS-COMPLETED-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-READ-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITES-COMPLETED-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-SECTORS-WRITTEN-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-READ-BYTES-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-WRITE-BYTES-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-RECEIVE-BYTES-NUM-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys-client-variable/AVG-TRANSMIT-BYTES-NUM-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
|
||||||
|
|
@ -1,18 +1,19 @@
|
||||||
test_title: Write 1M keys, 256-byte key, 1KB value, Best Throughput (etcd 1,000, Zookeeper 500, Consul 500 clients)
|
test_title: Write 1M keys, 256-byte key, 1KB value, Best Throughput (etcd 1,000, Zookeeper 500, Consul 500 clients)
|
||||||
test_description: |
|
test_description: |
|
||||||
- Google Cloud Compute Engine
|
- Google Cloud Compute Engine
|
||||||
- 4 machines of 16 vCPUs + 30 GB Memory + 300 GB SSD (1 for client)
|
- 4 machines of 16 vCPUs + 60 GB Memory + 300 GB SSD (1 for client)
|
||||||
- Ubuntu 16.10
|
- Ubuntu 16.10
|
||||||
- etcd v3.1 (Go 1.7.5)
|
- etcd tip (Go 1.8.0)
|
||||||
- Zookeeper r3.4.9
|
- Zookeeper r3.5.2-alpha
|
||||||
- Java 8
|
- Java 8
|
||||||
- javac 1.8.0_121
|
- javac 1.8.0_121
|
||||||
- Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
|
- Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
|
||||||
- Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)
|
- Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)
|
||||||
- Consul v0.7.4 (Go 1.7.5)
|
- `/usr/bin/java -Djute.maxbuffer=33554432 -Xms50G -Xmx50G`
|
||||||
|
- Consul v0.7.5 (Go 1.8.0)
|
||||||
|
|
||||||
# common control options for all client machines
|
# common control options for all client machines
|
||||||
control:
|
config_client_machine_initial:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: /home/gyuho
|
path_prefix: /home/gyuho
|
||||||
log_path: client-control.log
|
log_path: client-control.log
|
||||||
|
|
@ -30,21 +31,21 @@ control:
|
||||||
# set this in 'control' machine, to automate log uploading in remote 'agent' machines
|
# set this in 'control' machine, to automate log uploading in remote 'agent' machines
|
||||||
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
||||||
google_cloud_storage_bucket_name: dbtester-results
|
google_cloud_storage_bucket_name: dbtester-results
|
||||||
google_cloud_storage_sub_directory: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput
|
google_cloud_storage_sub_directory: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput
|
||||||
|
|
||||||
all_database_id_list: [etcdv3, zookeeper, consul]
|
all_database_id_list: [etcd__tip, zookeeper__r3_5_2_alpha, consul__v0_7_5]
|
||||||
|
|
||||||
datatbase_id_to_test_group:
|
datatbase_id_to_config_client_machine_agent_control:
|
||||||
etcdv3:
|
etcd__tip:
|
||||||
database_description: etcd v3.1 (Go 1.7.5)
|
database_description: etcd tip (Go 1.8.0)
|
||||||
peer_ips:
|
peer_ips:
|
||||||
- 10.240.0.20
|
- 10.240.0.7
|
||||||
- 10.240.0.21
|
- 10.240.0.8
|
||||||
- 10.240.0.22
|
- 10.240.0.12
|
||||||
database_port_to_connect: 2379
|
database_port_to_connect: 2379
|
||||||
agent_port_to_connect: 3500
|
agent_port_to_connect: 3500
|
||||||
|
|
||||||
etcdv3:
|
etcd__tip:
|
||||||
# --snapshot-count
|
# --snapshot-count
|
||||||
snap_count: 100000
|
snap_count: 100000
|
||||||
# --quota-backend-bytes; 8 GB
|
# --quota-backend-bytes; 8 GB
|
||||||
|
|
@ -74,17 +75,25 @@ datatbase_id_to_test_group:
|
||||||
step3_stop_database: true
|
step3_stop_database: true
|
||||||
step4_upload_logs: true
|
step4_upload_logs: true
|
||||||
|
|
||||||
zookeeper:
|
zookeeper__r3_5_2_alpha:
|
||||||
database_description: Zookeeper r3.4.9 (Java 8)
|
database_description: Zookeeper r3.5.2-alpha (Java 8)
|
||||||
peer_ips:
|
peer_ips:
|
||||||
- 10.240.0.25
|
- 10.240.0.21
|
||||||
- 10.240.0.27
|
- 10.240.0.22
|
||||||
- 10.240.0.28
|
- 10.240.0.23
|
||||||
database_port_to_connect: 2181
|
database_port_to_connect: 2181
|
||||||
agent_port_to_connect: 3500
|
agent_port_to_connect: 3500
|
||||||
|
|
||||||
# http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html
|
# http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html
|
||||||
zookeeper:
|
zookeeper__r3_5_2_alpha:
|
||||||
|
# maximum size, in bytes, of a request or response
|
||||||
|
# set it to 33 MB
|
||||||
|
java_d_jute_max_buffer: 33554432
|
||||||
|
|
||||||
|
# JVM min,max heap size
|
||||||
|
java_xms: 50G
|
||||||
|
java_xmx: 50G
|
||||||
|
|
||||||
# tickTime; the length of a single tick, which is the basic time unit used by ZooKeeper,
|
# tickTime; the length of a single tick, which is the basic time unit used by ZooKeeper,
|
||||||
# as measured in milliseconds.
|
# as measured in milliseconds.
|
||||||
tick_time: 2000
|
tick_time: 2000
|
||||||
|
|
@ -130,12 +139,12 @@ datatbase_id_to_test_group:
|
||||||
step3_stop_database: true
|
step3_stop_database: true
|
||||||
step4_upload_logs: true
|
step4_upload_logs: true
|
||||||
|
|
||||||
consul:
|
consul__v0_7_5:
|
||||||
database_description: Consul v0.7.4 (Go 1.7.5)
|
database_description: Consul v0.7.5 (Go 1.8.0)
|
||||||
peer_ips:
|
peer_ips:
|
||||||
- 10.240.0.30
|
- 10.240.0.27
|
||||||
- 10.240.0.31
|
- 10.240.0.28
|
||||||
- 10.240.0.33
|
- 10.240.0.29
|
||||||
database_port_to_connect: 8500
|
database_port_to_connect: 8500
|
||||||
agent_port_to_connect: 3500
|
agent_port_to_connect: 3500
|
||||||
|
|
||||||
|
|
@ -164,10 +173,10 @@ datatbase_id_to_test_group:
|
||||||
step4_upload_logs: true
|
step4_upload_logs: true
|
||||||
|
|
||||||
|
|
||||||
datatbase_id_to_test_data:
|
datatbase_id_to_config_analyze_machine_initial:
|
||||||
etcdv3:
|
etcd__tip:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/etcd-v3.1-go1.7.5
|
path_prefix: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/etcd-tip-go1.8.0
|
||||||
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
||||||
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
||||||
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
||||||
|
|
@ -182,9 +191,9 @@ datatbase_id_to_test_data:
|
||||||
- 3-server-system-metrics-interpolated.csv
|
- 3-server-system-metrics-interpolated.csv
|
||||||
all_aggregated_output_path: all-aggregated.csv
|
all_aggregated_output_path: all-aggregated.csv
|
||||||
|
|
||||||
zookeeper:
|
zookeeper__r3_5_2_alpha:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/zookeeper-r3.4.9-java8
|
path_prefix: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/zookeeper-r3.5.2-alpha-java8
|
||||||
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
||||||
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
||||||
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
||||||
|
|
@ -199,9 +208,9 @@ datatbase_id_to_test_data:
|
||||||
- 3-server-system-metrics-interpolated.csv
|
- 3-server-system-metrics-interpolated.csv
|
||||||
all_aggregated_output_path: all-aggregated.csv
|
all_aggregated_output_path: all-aggregated.csv
|
||||||
|
|
||||||
consul:
|
consul__v0_7_5:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/consul-v0.7.4-go1.7.5
|
path_prefix: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/consul-v0.7.5-go1.8.0
|
||||||
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
||||||
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
||||||
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
||||||
|
|
@ -216,12 +225,12 @@ datatbase_id_to_test_data:
|
||||||
- 3-server-system-metrics-interpolated.csv
|
- 3-server-system-metrics-interpolated.csv
|
||||||
all_aggregated_output_path: all-aggregated.csv
|
all_aggregated_output_path: all-aggregated.csv
|
||||||
|
|
||||||
analyze:
|
analyze_all_aggregated_output:
|
||||||
all_aggregated_output_path_csv: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/all-aggregated.csv
|
all_aggregated_output_path_csv: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/all-aggregated.csv
|
||||||
all_aggregated_output_path_txt: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/all-aggregated.txt
|
all_aggregated_output_path_txt: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/all-aggregated.txt
|
||||||
|
|
||||||
plot_path_prefix: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput
|
analyze_plot_path_prefix: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput
|
||||||
plot_list:
|
analyze_plot_list:
|
||||||
- column: AVG-LATENCY-MS
|
- column: AVG-LATENCY-MS
|
||||||
x_axis: Second
|
x_axis: Second
|
||||||
y_axis: Latency(millisecond)
|
y_axis: Latency(millisecond)
|
||||||
|
|
@ -282,82 +291,82 @@ plot_list:
|
||||||
x_axis: Second
|
x_axis: Second
|
||||||
y_axis: Network Transmit(bytes) (Delta per Second)
|
y_axis: Network Transmit(bytes) (Delta per Second)
|
||||||
|
|
||||||
readme:
|
analyze_readme:
|
||||||
output_path: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/README.md
|
output_path: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/README.md
|
||||||
|
|
||||||
images:
|
images:
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-LATENCY-MS
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-LATENCY-MS
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-LATENCY-MS.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-LATENCY-MS.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-LATENCY-MS-BY-KEY
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-LATENCY-MS-BY-KEY
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-LATENCY-MS-BY-KEY.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-LATENCY-MS-BY-KEY.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-THROUGHPUT
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-THROUGHPUT
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-THROUGHPUT.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-THROUGHPUT.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VOLUNTARY-CTXT-SWITCHES
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VOLUNTARY-CTXT-SWITCHES
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VOLUNTARY-CTXT-SWITCHES.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VOLUNTARY-CTXT-SWITCHES.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-NON-VOLUNTARY-CTXT-SWITCHES
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-NON-VOLUNTARY-CTXT-SWITCHES
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-NON-VOLUNTARY-CTXT-SWITCHES.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-NON-VOLUNTARY-CTXT-SWITCHES.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-CPU
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-CPU
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-CPU.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-CPU.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/MAX-CPU
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/MAX-CPU
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/MAX-CPU.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/MAX-CPU.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VMRSS-MB
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VMRSS-MB
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VMRSS-MB.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VMRSS-MB.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VMRSS-MB-BY-KEY
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VMRSS-MB-BY-KEY
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VMRSS-MB-BY-KEY.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VMRSS-MB-BY-KEY.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-READS-COMPLETED-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-READS-COMPLETED-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-READS-COMPLETED-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-READS-COMPLETED-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-SECTORS-READ-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-SECTORS-READ-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-SECTORS-READ-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-SECTORS-READ-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-WRITES-COMPLETED-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-WRITES-COMPLETED-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-WRITES-COMPLETED-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-WRITES-COMPLETED-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-SECTORS-WRITTEN-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-SECTORS-WRITTEN-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-SECTORS-WRITTEN-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-SECTORS-WRITTEN-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-READ-BYTES-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-READ-BYTES-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-READ-BYTES-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-READ-BYTES-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-WRITE-BYTES-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-WRITE-BYTES-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-WRITE-BYTES-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-WRITE-BYTES-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-RECEIVE-BYTES-NUM-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-RECEIVE-BYTES-NUM-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-RECEIVE-BYTES-NUM-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-RECEIVE-BYTES-NUM-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-TRANSMIT-BYTES-NUM-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-TRANSMIT-BYTES-NUM-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-TRANSMIT-BYTES-NUM-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/AVG-TRANSMIT-BYTES-NUM-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
|
||||||
|
|
@ -1,18 +1,19 @@
|
||||||
test_title: Write 3-million keys, 256-byte key, 1KB value, Best Throughput (etcd 1,000, Zookeeper 500, Consul 500 clients)
|
test_title: Write 3-million keys, 256-byte key, 1KB value, Best Throughput (etcd 1,000, Zookeeper 500, Consul 500 clients)
|
||||||
test_description: |
|
test_description: |
|
||||||
- Google Cloud Compute Engine
|
- Google Cloud Compute Engine
|
||||||
- 4 machines of 16 vCPUs + 30 GB Memory + 300 GB SSD (1 for client)
|
- 4 machines of 16 vCPUs + 60 GB Memory + 300 GB SSD (1 for client)
|
||||||
- Ubuntu 16.10
|
- Ubuntu 16.10
|
||||||
- etcd v3.1 (Go 1.7.5)
|
- etcd tip (Go 1.8.0)
|
||||||
- Zookeeper r3.4.9
|
- Zookeeper r3.5.2-alpha
|
||||||
- Java 8
|
- Java 8
|
||||||
- javac 1.8.0_121
|
- javac 1.8.0_121
|
||||||
- Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
|
- Java(TM) SE Runtime Environment (build 1.8.0_121-b13)
|
||||||
- Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)
|
- Java HotSpot(TM) 64-Bit Server VM (build 25.121-b13, mixed mode)
|
||||||
- Consul v0.7.4 (Go 1.7.5)
|
- `/usr/bin/java -Djute.maxbuffer=33554432 -Xms50G -Xmx50G`
|
||||||
|
- Consul v0.7.5 (Go 1.8.0)
|
||||||
|
|
||||||
# common control options for all client machines
|
# common control options for all client machines
|
||||||
control:
|
config_client_machine_initial:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: /home/gyuho
|
path_prefix: /home/gyuho
|
||||||
log_path: client-control.log
|
log_path: client-control.log
|
||||||
|
|
@ -30,21 +31,21 @@ control:
|
||||||
# set this in 'control' machine, to automate log uploading in remote 'agent' machines
|
# set this in 'control' machine, to automate log uploading in remote 'agent' machines
|
||||||
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
google_cloud_storage_key_path: /home/gyuho/gcloud-key.json
|
||||||
google_cloud_storage_bucket_name: dbtester-results
|
google_cloud_storage_bucket_name: dbtester-results
|
||||||
google_cloud_storage_sub_directory: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys
|
google_cloud_storage_sub_directory: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys
|
||||||
|
|
||||||
all_database_id_list: [etcdv3, zookeeper, consul]
|
all_database_id_list: [etcd__tip, zookeeper__r3_5_2_alpha, consul__v0_7_5]
|
||||||
|
|
||||||
datatbase_id_to_test_group:
|
datatbase_id_to_config_client_machine_agent_control:
|
||||||
etcdv3:
|
etcd__tip:
|
||||||
database_description: etcd v3.1 (Go 1.7.5)
|
database_description: etcd tip (Go 1.8.0)
|
||||||
peer_ips:
|
peer_ips:
|
||||||
- 10.240.0.20
|
- 10.240.0.7
|
||||||
- 10.240.0.21
|
- 10.240.0.8
|
||||||
- 10.240.0.22
|
- 10.240.0.12
|
||||||
database_port_to_connect: 2379
|
database_port_to_connect: 2379
|
||||||
agent_port_to_connect: 3500
|
agent_port_to_connect: 3500
|
||||||
|
|
||||||
etcdv3:
|
etcd__tip:
|
||||||
# --snapshot-count
|
# --snapshot-count
|
||||||
snap_count: 100000
|
snap_count: 100000
|
||||||
# --quota-backend-bytes; 8 GB
|
# --quota-backend-bytes; 8 GB
|
||||||
|
|
@ -74,17 +75,25 @@ datatbase_id_to_test_group:
|
||||||
step3_stop_database: true
|
step3_stop_database: true
|
||||||
step4_upload_logs: true
|
step4_upload_logs: true
|
||||||
|
|
||||||
zookeeper:
|
zookeeper__r3_5_2_alpha:
|
||||||
database_description: Zookeeper r3.4.9 (Java 8)
|
database_description: Zookeeper r3.5.2-alpha (Java 8)
|
||||||
peer_ips:
|
peer_ips:
|
||||||
- 10.240.0.25
|
- 10.240.0.21
|
||||||
- 10.240.0.27
|
- 10.240.0.22
|
||||||
- 10.240.0.28
|
- 10.240.0.23
|
||||||
database_port_to_connect: 2181
|
database_port_to_connect: 2181
|
||||||
agent_port_to_connect: 3500
|
agent_port_to_connect: 3500
|
||||||
|
|
||||||
# http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html
|
# http://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html
|
||||||
zookeeper:
|
zookeeper__r3_5_2_alpha:
|
||||||
|
# maximum size, in bytes, of a request or response
|
||||||
|
# set it to 33 MB
|
||||||
|
java_d_jute_max_buffer: 33554432
|
||||||
|
|
||||||
|
# JVM min,max heap size
|
||||||
|
java_xms: 50G
|
||||||
|
java_xmx: 50G
|
||||||
|
|
||||||
# tickTime; the length of a single tick, which is the basic time unit used by ZooKeeper,
|
# tickTime; the length of a single tick, which is the basic time unit used by ZooKeeper,
|
||||||
# as measured in milliseconds.
|
# as measured in milliseconds.
|
||||||
tick_time: 2000
|
tick_time: 2000
|
||||||
|
|
@ -130,12 +139,12 @@ datatbase_id_to_test_group:
|
||||||
step3_stop_database: true
|
step3_stop_database: true
|
||||||
step4_upload_logs: true
|
step4_upload_logs: true
|
||||||
|
|
||||||
consul:
|
consul__v0_7_5:
|
||||||
database_description: Consul v0.7.4 (Go 1.7.5)
|
database_description: Consul v0.7.5 (Go 1.8.0)
|
||||||
peer_ips:
|
peer_ips:
|
||||||
- 10.240.0.30
|
- 10.240.0.27
|
||||||
- 10.240.0.31
|
- 10.240.0.28
|
||||||
- 10.240.0.33
|
- 10.240.0.29
|
||||||
database_port_to_connect: 8500
|
database_port_to_connect: 8500
|
||||||
agent_port_to_connect: 3500
|
agent_port_to_connect: 3500
|
||||||
|
|
||||||
|
|
@ -164,10 +173,10 @@ datatbase_id_to_test_group:
|
||||||
step4_upload_logs: true
|
step4_upload_logs: true
|
||||||
|
|
||||||
|
|
||||||
datatbase_id_to_test_data:
|
datatbase_id_to_config_analyze_machine_initial:
|
||||||
etcdv3:
|
etcd__tip:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/etcd-v3.1-go1.7.5
|
path_prefix: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/etcd-tip-go1.8.0
|
||||||
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
||||||
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
||||||
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
||||||
|
|
@ -182,9 +191,9 @@ datatbase_id_to_test_data:
|
||||||
- 3-server-system-metrics-interpolated.csv
|
- 3-server-system-metrics-interpolated.csv
|
||||||
all_aggregated_output_path: all-aggregated.csv
|
all_aggregated_output_path: all-aggregated.csv
|
||||||
|
|
||||||
zookeeper:
|
zookeeper__r3_5_2_alpha:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/zookeeper-r3.4.9-java8
|
path_prefix: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/zookeeper-r3.5.2-alpha-java8
|
||||||
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
||||||
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
||||||
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
||||||
|
|
@ -199,9 +208,9 @@ datatbase_id_to_test_data:
|
||||||
- 3-server-system-metrics-interpolated.csv
|
- 3-server-system-metrics-interpolated.csv
|
||||||
all_aggregated_output_path: all-aggregated.csv
|
all_aggregated_output_path: all-aggregated.csv
|
||||||
|
|
||||||
consul:
|
consul__v0_7_5:
|
||||||
# if not empty, all test data paths are prefixed
|
# if not empty, all test data paths are prefixed
|
||||||
path_prefix: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/consul-v0.7.4-go1.7.5
|
path_prefix: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/consul-v0.7.5-go1.8
|
||||||
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
client_system_metrics_interpolated_path: client-system-metrics-interpolated.csv
|
||||||
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
client_latency_throughput_timeseries_path: client-latency-throughput-timeseries.csv
|
||||||
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
client_latency_distribution_all_path: client-latency-distribution-all.csv
|
||||||
|
|
@ -216,12 +225,12 @@ datatbase_id_to_test_data:
|
||||||
- 3-server-system-metrics-interpolated.csv
|
- 3-server-system-metrics-interpolated.csv
|
||||||
all_aggregated_output_path: all-aggregated.csv
|
all_aggregated_output_path: all-aggregated.csv
|
||||||
|
|
||||||
analyze:
|
analyze_all_aggregated_output:
|
||||||
all_aggregated_output_path_csv: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/all-aggregated.csv
|
all_aggregated_output_path_csv: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/all-aggregated.csv
|
||||||
all_aggregated_output_path_txt: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/all-aggregated.txt
|
all_aggregated_output_path_txt: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/all-aggregated.txt
|
||||||
|
|
||||||
plot_path_prefix: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys
|
analyze_plot_path_prefix: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys
|
||||||
plot_list:
|
analyze_plot_list:
|
||||||
- column: AVG-LATENCY-MS
|
- column: AVG-LATENCY-MS
|
||||||
x_axis: Second
|
x_axis: Second
|
||||||
y_axis: Latency(millisecond)
|
y_axis: Latency(millisecond)
|
||||||
|
|
@ -282,82 +291,82 @@ plot_list:
|
||||||
x_axis: Second
|
x_axis: Second
|
||||||
y_axis: Network Transmit(bytes) (Delta per Second)
|
y_axis: Network Transmit(bytes) (Delta per Second)
|
||||||
|
|
||||||
readme:
|
analyze_readme:
|
||||||
output_path: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/README.md
|
output_path: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/README.md
|
||||||
|
|
||||||
images:
|
images:
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-LATENCY-MS
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-LATENCY-MS
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-LATENCY-MS.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-LATENCY-MS.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-LATENCY-MS-BY-KEY
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-LATENCY-MS-BY-KEY
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-LATENCY-MS-BY-KEY.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-LATENCY-MS-BY-KEY.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-LATENCY-MS-BY-KEY-ERROR-POINTS.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-THROUGHPUT
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-THROUGHPUT
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-THROUGHPUT.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-THROUGHPUT.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VOLUNTARY-CTXT-SWITCHES
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VOLUNTARY-CTXT-SWITCHES
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VOLUNTARY-CTXT-SWITCHES.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VOLUNTARY-CTXT-SWITCHES.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-NON-VOLUNTARY-CTXT-SWITCHES
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-NON-VOLUNTARY-CTXT-SWITCHES
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-NON-VOLUNTARY-CTXT-SWITCHES.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-NON-VOLUNTARY-CTXT-SWITCHES.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-CPU
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-CPU
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-CPU.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-CPU.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/MAX-CPU
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/MAX-CPU
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/MAX-CPU.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/MAX-CPU.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VMRSS-MB
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VMRSS-MB
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VMRSS-MB.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VMRSS-MB.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VMRSS-MB-BY-KEY
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VMRSS-MB-BY-KEY
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VMRSS-MB-BY-KEY.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VMRSS-MB-BY-KEY.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-VMRSS-MB-BY-KEY-ERROR-POINTS.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-READS-COMPLETED-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-READS-COMPLETED-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-READS-COMPLETED-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-READS-COMPLETED-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-SECTORS-READ-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-SECTORS-READ-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-SECTORS-READ-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-SECTORS-READ-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-WRITES-COMPLETED-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-WRITES-COMPLETED-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-WRITES-COMPLETED-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-WRITES-COMPLETED-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-SECTORS-WRITTEN-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-SECTORS-WRITTEN-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-SECTORS-WRITTEN-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-SECTORS-WRITTEN-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-READ-BYTES-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-READ-BYTES-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-READ-BYTES-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-READ-BYTES-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-WRITE-BYTES-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-WRITE-BYTES-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-WRITE-BYTES-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-WRITE-BYTES-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-RECEIVE-BYTES-NUM-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-RECEIVE-BYTES-NUM-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-RECEIVE-BYTES-NUM-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-RECEIVE-BYTES-NUM-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
||||||
- title: 2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-TRANSMIT-BYTES-NUM-DELTA
|
- title: 2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-TRANSMIT-BYTES-NUM-DELTA
|
||||||
path: https://storage.googleapis.com/dbtester-results/2017Q1-00-etcd-zookeeper-consul/03-write-too-many-keys/AVG-TRANSMIT-BYTES-NUM-DELTA.svg
|
path: https://storage.googleapis.com/dbtester-results/2017Q1-01-etcd-zookeeper-consul/03-write-too-many-keys/AVG-TRANSMIT-BYTES-NUM-DELTA.svg
|
||||||
type: remote
|
type: remote
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
rm -f /tmp/consul.zip
|
rm -f /tmp/consul.zip
|
||||||
curl -sf -o /tmp/consul.zip https://releases.hashicorp.com/consul/0.7.4/consul_0.7.4_linux_amd64.zip
|
curl -sf -o /tmp/consul.zip https://releases.hashicorp.com/consul/0.7.5/consul_0.7.5_linux_amd64.zip
|
||||||
|
|
||||||
rm -f ${GOPATH}/bin/consul
|
rm -f ${GOPATH}/bin/consul
|
||||||
unzip /tmp/consul.zip -d ${GOPATH}/bin
|
unzip /tmp/consul.zip -d ${GOPATH}/bin
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,7 @@ cp ${GOPATH}/src/${GIT_PATH}/bin/etcdctl ${GOPATH}/bin/etcdctl
|
||||||
sudo cp ${GOPATH}/src/${GIT_PATH}/bin/etcdctl /etcdctl
|
sudo cp ${GOPATH}/src/${GIT_PATH}/bin/etcdctl /etcdctl
|
||||||
COMMENT
|
COMMENT
|
||||||
|
|
||||||
ETCD_VER=v3.1.0
|
ETCD_VER=v3.1.1
|
||||||
|
|
||||||
GOOGLE_URL=https://storage.googleapis.com/etcd
|
GOOGLE_URL=https://storage.googleapis.com/etcd
|
||||||
GITHUB_URL=https://github.com/coreos/etcd/releases/download
|
GITHUB_URL=https://github.com/coreos/etcd/releases/download
|
||||||
|
|
|
||||||
|
|
@ -59,12 +59,24 @@ ansible-playbook /tmp/install-java.yml
|
||||||
java -version
|
java -version
|
||||||
javac -version
|
javac -version
|
||||||
|
|
||||||
echo "Installing Zookeeper..."
|
<<COMMENT
|
||||||
ZOOKEEPER_VERSION=3.4.9
|
ZK_VERSION=3.4.9
|
||||||
sudo rm -rf $HOME/zookeeper
|
sudo rm -rf $HOME/zookeeper
|
||||||
sudo curl -sf -o /tmp/zookeeper-$ZOOKEEPER_VERSION.tar.gz -L https://www.apache.org/dist/zookeeper/zookeeper-$ZOOKEEPER_VERSION/zookeeper-$ZOOKEEPER_VERSION.tar.gz
|
sudo curl -sf -o /tmp/zookeeper-$ZK_VERSION.tar.gz -L https://www.apache.org/dist/zookeeper/zookeeper-$ZK_VERSION/zookeeper-$ZK_VERSION.tar.gz
|
||||||
sudo tar -xzf /tmp/zookeeper-$ZOOKEEPER_VERSION.tar.gz -C /tmp/
|
sudo tar -xzf /tmp/zookeeper-$ZK_VERSION.tar.gz -C /tmp/
|
||||||
sudo mv /tmp/zookeeper-$ZOOKEEPER_VERSION /tmp/zookeeper
|
sudo mv /tmp/zookeeper-$ZK_VERSION /tmp/zookeeper
|
||||||
|
sudo mv /tmp/zookeeper $HOME/
|
||||||
|
sudo chmod -R 777 $HOME/zookeeper/
|
||||||
|
mkdir -p $HOME/zookeeper/zookeeper.data
|
||||||
|
touch $HOME/zookeeper/zookeeper.data/myid
|
||||||
|
sudo chmod -R 777 $HOME/zookeeper/zookeeper.data/
|
||||||
|
COMMENT
|
||||||
|
|
||||||
|
ZK_VERSION=3.5.2-alpha
|
||||||
|
sudo rm -rf $HOME/zookeeper
|
||||||
|
sudo curl -sf -o /tmp/zookeeper-$ZK_VERSION.tar.gz -L https://www.apache.org/dist/zookeeper/zookeeper-$ZK_VERSION/zookeeper-$ZK_VERSION.tar.gz
|
||||||
|
sudo tar -xzf /tmp/zookeeper-$ZK_VERSION.tar.gz -C /tmp/
|
||||||
|
sudo mv /tmp/zookeeper-$ZK_VERSION /tmp/zookeeper
|
||||||
sudo mv /tmp/zookeeper $HOME/
|
sudo mv /tmp/zookeeper $HOME/
|
||||||
sudo chmod -R 777 $HOME/zookeeper/
|
sudo chmod -R 777 $HOME/zookeeper/
|
||||||
mkdir -p $HOME/zookeeper/zookeeper.data
|
mkdir -p $HOME/zookeeper/zookeeper.data
|
||||||
|
|
|
||||||
|
|
@ -7,9 +7,9 @@ nohup dbtester agent --network-interface ens4 --disk-device sda --agent-port :
|
||||||
|
|
||||||
# control; specify 'control' configuration file (client number, key number, key-value size),
|
# control; specify 'control' configuration file (client number, key number, key-value size),
|
||||||
# this starts database stressing, and shuts down the database when done
|
# this starts database stressing, and shuts down the database when done
|
||||||
nohup dbtester control --database-id etcdv3 --config config.yaml > $HOME/control.log 2>&1 &
|
nohup dbtester control --database-id etcd__tip --config config.yaml > $HOME/control.log 2>&1 &
|
||||||
nohup dbtester control --database-id zookeeper --config config.yaml > $HOME/control.log 2>&1 &
|
nohup dbtester control --database-id zookeeper__r3_5_2_alpha --config config.yaml > $HOME/control.log 2>&1 &
|
||||||
nohup dbtester control --database-id consul --config config.yaml > $HOME/control.log 2>&1 &
|
nohup dbtester control --database-id consul__v0_7_5 --config config.yaml > $HOME/control.log 2>&1 &
|
||||||
|
|
||||||
# analyze; get all data from remote machines
|
# analyze; get all data from remote machines
|
||||||
# and specify 'analyze' configuration file,
|
# and specify 'analyze' configuration file,
|
||||||
|
|
|
||||||
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -35,6 +36,7 @@ import (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
|
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
|
||||||
|
ErrOldCluster = errors.New("etcdclient: old cluster version")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Client provides and manages an etcd v3 client session.
|
// Client provides and manages an etcd v3 client session.
|
||||||
|
|
@ -272,7 +274,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
|
||||||
tokenMu: &sync.RWMutex{},
|
tokenMu: &sync.RWMutex{},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := c.getToken(context.TODO())
|
err := c.getToken(c.ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -307,7 +309,12 @@ func newClient(cfg *Config) (*Client, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// use a temporary skeleton client to bootstrap first connection
|
// use a temporary skeleton client to bootstrap first connection
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
baseCtx := context.TODO()
|
||||||
|
if cfg.Context != nil {
|
||||||
|
baseCtx = cfg.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(baseCtx)
|
||||||
client := &Client{
|
client := &Client{
|
||||||
conn: nil,
|
conn: nil,
|
||||||
cfg: *cfg,
|
cfg: *cfg,
|
||||||
|
|
@ -353,10 +360,57 @@ func newClient(cfg *Config) (*Client, error) {
|
||||||
client.Auth = NewAuth(client)
|
client.Auth = NewAuth(client)
|
||||||
client.Maintenance = NewMaintenance(client)
|
client.Maintenance = NewMaintenance(client)
|
||||||
|
|
||||||
|
if cfg.RejectOldCluster {
|
||||||
|
if err := client.checkVersion(); err != nil {
|
||||||
|
client.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
go client.autoSync()
|
go client.autoSync()
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Client) checkVersion() (err error) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
errc := make(chan error, len(c.cfg.Endpoints))
|
||||||
|
ctx, cancel := context.WithCancel(c.ctx)
|
||||||
|
if c.cfg.DialTimeout > 0 {
|
||||||
|
ctx, _ = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||||
|
}
|
||||||
|
wg.Add(len(c.cfg.Endpoints))
|
||||||
|
for _, ep := range c.cfg.Endpoints {
|
||||||
|
// if cluster is current, any endpoint gives a recent version
|
||||||
|
go func(e string) {
|
||||||
|
defer wg.Done()
|
||||||
|
resp, rerr := c.Status(ctx, e)
|
||||||
|
if rerr != nil {
|
||||||
|
errc <- rerr
|
||||||
|
return
|
||||||
|
}
|
||||||
|
vs := strings.Split(resp.Version, ".")
|
||||||
|
maj, min := 0, 0
|
||||||
|
if len(vs) >= 2 {
|
||||||
|
maj, rerr = strconv.Atoi(vs[0])
|
||||||
|
min, rerr = strconv.Atoi(vs[1])
|
||||||
|
}
|
||||||
|
if maj < 3 || (maj == 3 && min < 2) {
|
||||||
|
rerr = ErrOldCluster
|
||||||
|
}
|
||||||
|
errc <- rerr
|
||||||
|
}(ep)
|
||||||
|
}
|
||||||
|
// wait for success
|
||||||
|
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
||||||
|
if err = <-errc; err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cancel()
|
||||||
|
wg.Wait()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// ActiveConnection returns the current in-use connection
|
// ActiveConnection returns the current in-use connection
|
||||||
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
|
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -41,6 +42,13 @@ type Config struct {
|
||||||
// Password is a password for authentication.
|
// Password is a password for authentication.
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
|
|
||||||
|
// RejectOldCluster when set will refuse to create a client against an outdated cluster.
|
||||||
|
RejectOldCluster bool `json:"reject-old-cluster"`
|
||||||
|
|
||||||
// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
|
// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
|
||||||
DialOptions []grpc.DialOption
|
DialOptions []grpc.DialOption
|
||||||
|
|
||||||
|
// Context is the default client context; it can be used to cancel grpc dial out and
|
||||||
|
// other operations that do not have an explicit context.
|
||||||
|
Context context.Context
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -144,16 +144,19 @@ type keepAlive struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLease(c *Client) Lease {
|
func NewLease(c *Client) Lease {
|
||||||
|
return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease {
|
||||||
l := &lessor{
|
l := &lessor{
|
||||||
donec: make(chan struct{}),
|
donec: make(chan struct{}),
|
||||||
keepAlives: make(map[LeaseID]*keepAlive),
|
keepAlives: make(map[LeaseID]*keepAlive),
|
||||||
remote: RetryLeaseClient(c),
|
remote: remote,
|
||||||
firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second,
|
firstKeepAliveTimeout: keepAliveTimeout,
|
||||||
}
|
}
|
||||||
if l.firstKeepAliveTimeout == time.Second {
|
if l.firstKeepAliveTimeout == time.Second {
|
||||||
l.firstKeepAliveTimeout = defaultTTL
|
l.firstKeepAliveTimeout = defaultTTL
|
||||||
}
|
}
|
||||||
|
|
||||||
l.stopCtx, l.stopCancel = context.WithCancel(context.Background())
|
l.stopCtx, l.stopCancel = context.WithCancel(context.Background())
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
@ -407,7 +410,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// send update to all channels
|
// send update to all channels
|
||||||
nextKeepAlive := time.Now().Add(1 + time.Duration(karesp.TTL/3)*time.Second)
|
nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
|
||||||
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
|
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
|
||||||
for _, ch := range ka.chs {
|
for _, ch := range ka.chs {
|
||||||
select {
|
select {
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"sort"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/coreos/etcd/pkg/cpuutil"
|
"github.com/coreos/etcd/pkg/cpuutil"
|
||||||
|
|
@ -38,13 +39,35 @@ func GetDefaultHost() (string, error) {
|
||||||
return "", rerr
|
return "", rerr
|
||||||
}
|
}
|
||||||
|
|
||||||
for family, rmsg := range rmsgs {
|
// prioritize IPv4
|
||||||
host, oif, err := parsePREFSRC(rmsg)
|
if rmsg, ok := rmsgs[syscall.AF_INET]; ok {
|
||||||
if err != nil {
|
if host, err := chooseHost(syscall.AF_INET, rmsg); host != "" || err != nil {
|
||||||
return "", err
|
return host, err
|
||||||
}
|
}
|
||||||
if host != "" {
|
delete(rmsgs, syscall.AF_INET)
|
||||||
return host, nil
|
}
|
||||||
|
|
||||||
|
// sort so choice is deterministic
|
||||||
|
var families []int
|
||||||
|
for family := range rmsgs {
|
||||||
|
families = append(families, int(family))
|
||||||
|
}
|
||||||
|
sort.Ints(families)
|
||||||
|
|
||||||
|
for _, f := range families {
|
||||||
|
family := uint8(f)
|
||||||
|
if host, err := chooseHost(family, rmsgs[family]); host != "" || err != nil {
|
||||||
|
return host, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", errNoDefaultHost
|
||||||
|
}
|
||||||
|
|
||||||
|
func chooseHost(family uint8, rmsg *syscall.NetlinkMessage) (string, error) {
|
||||||
|
host, oif, err := parsePREFSRC(rmsg)
|
||||||
|
if host != "" || err != nil {
|
||||||
|
return host, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// prefsrc not detected, fall back to getting address from iface
|
// prefsrc not detected, fall back to getting address from iface
|
||||||
|
|
@ -64,9 +87,8 @@ func GetDefaultHost() (string, error) {
|
||||||
return net.IP(attr.Value).String(), nil
|
return net.IP(attr.Value).String(), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return "", errNoDefaultHost
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDefaultRoutes() (map[uint8]*syscall.NetlinkMessage, error) {
|
func getDefaultRoutes() (map[uint8]*syscall.NetlinkMessage, error) {
|
||||||
|
|
|
||||||
|
|
@ -196,7 +196,7 @@ type horizontalAxis struct {
|
||||||
|
|
||||||
// size returns the height of the axis.
|
// size returns the height of the axis.
|
||||||
func (a *horizontalAxis) size() (h vg.Length) {
|
func (a *horizontalAxis) size() (h vg.Length) {
|
||||||
if a.Label.Text != "" {
|
if a.Label.Text != "" { // We assume that the label isn't rotated.
|
||||||
h -= a.Label.Font.Extents().Descent
|
h -= a.Label.Font.Extents().Descent
|
||||||
h += a.Label.Height(a.Label.Text)
|
h += a.Label.Height(a.Label.Text)
|
||||||
}
|
}
|
||||||
|
|
@ -258,13 +258,9 @@ func (a *horizontalAxis) GlyphBoxes(*Plot) (boxes []GlyphBox) {
|
||||||
if t.IsMinor() {
|
if t.IsMinor() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
w := a.Tick.Label.Width(t.Label)
|
|
||||||
box := GlyphBox{
|
box := GlyphBox{
|
||||||
X: a.Norm(t.Value),
|
X: a.Norm(t.Value),
|
||||||
Rectangle: vg.Rectangle{
|
Rectangle: a.Tick.Label.Rectangle(t.Label),
|
||||||
Min: vg.Point{X: -w / 2},
|
|
||||||
Max: vg.Point{X: w / 2},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
boxes = append(boxes, box)
|
boxes = append(boxes, box)
|
||||||
}
|
}
|
||||||
|
|
@ -278,7 +274,7 @@ type verticalAxis struct {
|
||||||
|
|
||||||
// size returns the width of the axis.
|
// size returns the width of the axis.
|
||||||
func (a *verticalAxis) size() (w vg.Length) {
|
func (a *verticalAxis) size() (w vg.Length) {
|
||||||
if a.Label.Text != "" {
|
if a.Label.Text != "" { // We assume that the label isn't rotated.
|
||||||
w -= a.Label.Font.Extents().Descent
|
w -= a.Label.Font.Extents().Descent
|
||||||
w += a.Label.Height(a.Label.Text)
|
w += a.Label.Height(a.Label.Text)
|
||||||
}
|
}
|
||||||
|
|
@ -343,13 +339,9 @@ func (a *verticalAxis) GlyphBoxes(*Plot) (boxes []GlyphBox) {
|
||||||
if t.IsMinor() {
|
if t.IsMinor() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
h := a.Tick.Label.Height(t.Label)
|
|
||||||
box := GlyphBox{
|
box := GlyphBox{
|
||||||
Y: a.Norm(t.Value),
|
Y: a.Norm(t.Value),
|
||||||
Rectangle: vg.Rectangle{
|
Rectangle: a.Tick.Label.Rectangle(t.Label),
|
||||||
Min: vg.Point{Y: -h / 2},
|
|
||||||
Max: vg.Point{Y: h / 2},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
boxes = append(boxes, box)
|
boxes = append(boxes, box)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
@ -79,6 +80,11 @@ type QueryOptions struct {
|
||||||
// metadata key/value pairs. Currently, only one key/value pair can
|
// metadata key/value pairs. Currently, only one key/value pair can
|
||||||
// be provided for filtering.
|
// be provided for filtering.
|
||||||
NodeMeta map[string]string
|
NodeMeta map[string]string
|
||||||
|
|
||||||
|
// RelayFactor is used in keyring operations to cause reponses to be
|
||||||
|
// relayed back to the sender through N other random nodes. Must be
|
||||||
|
// a value from 0 to 5 (inclusive).
|
||||||
|
RelayFactor uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteOptions are used to parameterize a write
|
// WriteOptions are used to parameterize a write
|
||||||
|
|
@ -90,6 +96,11 @@ type WriteOptions struct {
|
||||||
// Token is used to provide a per-request ACL token
|
// Token is used to provide a per-request ACL token
|
||||||
// which overrides the agent's default token.
|
// which overrides the agent's default token.
|
||||||
Token string
|
Token string
|
||||||
|
|
||||||
|
// RelayFactor is used in keyring operations to cause reponses to be
|
||||||
|
// relayed back to the sender through N other random nodes. Must be
|
||||||
|
// a value from 0 to 5 (inclusive).
|
||||||
|
RelayFactor uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
// QueryMeta is used to return meta data about a query
|
// QueryMeta is used to return meta data about a query
|
||||||
|
|
@ -336,14 +347,23 @@ func NewClient(config *Config) (*Client, error) {
|
||||||
config.HttpClient = defConfig.HttpClient
|
config.HttpClient = defConfig.HttpClient
|
||||||
}
|
}
|
||||||
|
|
||||||
if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 {
|
parts := strings.SplitN(config.Address, "://", 2)
|
||||||
|
if len(parts) == 2 {
|
||||||
|
switch parts[0] {
|
||||||
|
case "http":
|
||||||
|
case "https":
|
||||||
|
config.Scheme = "https"
|
||||||
|
case "unix":
|
||||||
trans := cleanhttp.DefaultTransport()
|
trans := cleanhttp.DefaultTransport()
|
||||||
trans.Dial = func(_, _ string) (net.Conn, error) {
|
trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
|
||||||
return net.Dial("unix", parts[1])
|
return net.Dial("unix", parts[1])
|
||||||
}
|
}
|
||||||
config.HttpClient = &http.Client{
|
config.HttpClient = &http.Client{
|
||||||
Transport: trans,
|
Transport: trans,
|
||||||
}
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0])
|
||||||
|
}
|
||||||
config.Address = parts[1]
|
config.Address = parts[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -396,6 +416,9 @@ func (r *request) setQueryOptions(q *QueryOptions) {
|
||||||
r.params.Add("node-meta", key+":"+value)
|
r.params.Add("node-meta", key+":"+value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if q.RelayFactor != 0 {
|
||||||
|
r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// durToMsec converts a duration to a millisecond specified string. If the
|
// durToMsec converts a duration to a millisecond specified string. If the
|
||||||
|
|
@ -437,6 +460,9 @@ func (r *request) setWriteOptions(q *WriteOptions) {
|
||||||
if q.Token != "" {
|
if q.Token != "" {
|
||||||
r.header.Set("X-Consul-Token", q.Token)
|
r.header.Set("X-Consul-Token", q.Token)
|
||||||
}
|
}
|
||||||
|
if q.RelayFactor != 0 {
|
||||||
|
r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// toHTTP converts the request to an HTTP request
|
// toHTTP converts the request to an HTTP request
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue