control: make 'Skip' flag clearer, clean up fields

This commit is contained in:
Gyu-Ho Lee 2016-11-14 15:29:51 -08:00
parent 03899c4e29
commit c3136e8e29
No known key found for this signature in database
GPG Key ID: 1DDD39C7EB70C24C
5 changed files with 45 additions and 40 deletions

View File

@ -39,30 +39,31 @@ type Config struct {
AgentEndpoints []string
DatabaseEndpoints []string
ResultPathTimeSeries string `yaml:"result_path_time_series"`
ResultPathLog string `yaml:"result_path_log"`
// https://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html
Step1 struct {
Skip bool `yaml:"skip"`
SkipStartDatabase bool `yaml:"skip_start_database"`
ZookeeperMaxClientCnxns int64 `yaml:"zookeeper_max_client_connections"`
ZookeeperSnapCount int64 `yaml:"zookeeper_snap_count"`
} `yaml:"step1"`
Step2 struct {
Skip bool `yaml:"skip"`
BenchType string `yaml:"bench_type"`
StaleRead bool `yaml:"stale_read"`
ResultPath string `yaml:"result_path"`
Connections int `yaml:"connections"`
Clients int `yaml:"clients"`
KeySize int `yaml:"key_size"`
SameKey bool `yaml:"same_key"`
ValueSize int `yaml:"value_size"`
TotalRequests int `yaml:"total_requests"`
RequestIntervalMs int `yaml:"request_interval_ms"`
SkipStressDatabase bool `yaml:"skip_stress_database"`
BenchType string `yaml:"bench_type"`
StaleRead bool `yaml:"stale_read"`
Connections int `yaml:"connections"`
Clients int `yaml:"clients"`
KeySize int `yaml:"key_size"`
SameKey bool `yaml:"same_key"`
ValueSize int `yaml:"value_size"`
TotalRequests int `yaml:"total_requests"`
RequestIntervalMs int `yaml:"request_interval_ms"`
} `yaml:"step2"`
Step3 struct {
Skip bool `yaml:"skip"`
ResultPath string `yaml:"result_path"`
SkipStopDatabase bool `yaml:"skip_stop_database"`
}
}

View File

@ -51,8 +51,15 @@ func TestReadConfig(t *testing.T) {
if c.GoogleCloudStorageSubDirectory != "2016041501" {
t.Fatalf("unexpected %s", c.GoogleCloudStorageSubDirectory)
}
if c.Step1.Skip {
t.Fatalf("unexpected %v", c.Step1.Skip)
if c.ResultPathTimeSeries != "timeseries.csv" {
t.Fatalf("unexpected %s", c.ResultPathTimeSeries)
}
if c.ResultPathLog != "result.log" {
t.Fatalf("unexpected %v", c.ResultPathLog)
}
if c.Step1.SkipStartDatabase {
t.Fatalf("unexpected %v", c.Step1.SkipStartDatabase)
}
if c.Step1.ZookeeperMaxClientCnxns != 5000 {
t.Fatalf("unexpected %d", c.Step1.ZookeeperMaxClientCnxns)
@ -60,15 +67,13 @@ func TestReadConfig(t *testing.T) {
if c.Step1.ZookeeperSnapCount != 100000 {
t.Fatalf("unexpected %d", c.Step1.ZookeeperSnapCount)
}
if c.Step2.Skip {
t.Fatalf("unexpected %v", c.Step2.Skip)
if c.Step2.SkipStressDatabase {
t.Fatalf("unexpected %v", c.Step2.SkipStressDatabase)
}
if c.Step2.BenchType != "write" {
t.Fatalf("unexpected %s", c.Step2.BenchType)
}
if c.Step2.ResultPath != "timeseries.csv" {
t.Fatalf("unexpected %s", c.Step2.ResultPath)
}
if c.Step2.KeySize != 8 {
t.Fatalf("unexpected %d", c.Step2.KeySize)
}
@ -87,10 +92,8 @@ func TestReadConfig(t *testing.T) {
if c.Step2.RequestIntervalMs != 100 {
t.Fatalf("unexpected %d", c.Step2.RequestIntervalMs)
}
if c.Step3.Skip {
t.Fatalf("unexpected %v", c.Step3.Skip)
}
if c.Step3.ResultPath != "result.log" {
t.Fatalf("unexpected %v", c.Step3.ResultPath)
if c.Step3.SkipStopDatabase {
t.Fatalf("unexpected %v", c.Step3.SkipStopDatabase)
}
}

View File

@ -58,7 +58,7 @@ func CommandFunc(cmd *cobra.Command, args []string) error {
default:
return fmt.Errorf("%q is not supported", cfg.Database)
}
if !cfg.Step2.Skip {
if !cfg.Step2.SkipStressDatabase {
switch cfg.Step2.BenchType {
case "write":
case "read":
@ -85,14 +85,14 @@ func CommandFunc(cmd *cobra.Command, args []string) error {
}
println()
if !cfg.Step1.Skip {
if !cfg.Step1.SkipStartDatabase {
plog.Info("step 1: starting databases...")
if err = step1(cfg); err != nil {
return err
}
}
if !cfg.Step2.Skip {
if !cfg.Step2.SkipStressDatabase {
println()
time.Sleep(5 * time.Second)
plog.Info("step 2: starting tests...")
@ -101,7 +101,7 @@ func CommandFunc(cmd *cobra.Command, args []string) error {
}
}
if !cfg.Step3.Skip {
if !cfg.Step3.SkipStopDatabase {
println()
time.Sleep(5 * time.Second)
plog.Info("step 3: stopping databases...")

View File

@ -157,7 +157,7 @@ func (r *report) printSecondSample() {
plog.Println("getTimeSeries finished for", len(r.sps.tm), "points")
fmt.Println(txt)
if err := toFile(txt, cfg.Step2.ResultPath); err != nil {
if err := toFile(txt, cfg.ResultPathTimeSeries); err != nil {
plog.Fatal(err)
}
@ -167,8 +167,8 @@ func (r *report) printSecondSample() {
plog.Fatal(err)
}
srcCSVResultPath := cfg.Step2.ResultPath
dstCSVResultPath := filepath.Base(cfg.Step2.ResultPath)
srcCSVResultPath := cfg.ResultPathTimeSeries
dstCSVResultPath := filepath.Base(cfg.ResultPathTimeSeries)
if !strings.HasPrefix(dstCSVResultPath, cfg.TestName) {
dstCSVResultPath = fmt.Sprintf("%s-%s", cfg.TestName, dstCSVResultPath)
}
@ -192,8 +192,8 @@ func (r *report) printSecondSample() {
plog.Fatal(err)
}
srcCSVResultPath := cfg.Step3.ResultPath
dstCSVResultPath := filepath.Base(cfg.Step3.ResultPath)
srcCSVResultPath := cfg.ResultPathLog
dstCSVResultPath := filepath.Base(cfg.ResultPathLog)
if !strings.HasPrefix(dstCSVResultPath, cfg.TestName) {
dstCSVResultPath = fmt.Sprintf("%s-%s", cfg.TestName, dstCSVResultPath)
}

View File

@ -14,18 +14,20 @@ peer_ips:
agent_port: 3500
database_port: 2379
result_path_time_series: timeseries.csv
result_path_log: result.log
# start database by sending RPC calls to agents
step1:
skip: false
skip_start_database: false
zookeeper_max_client_connections: 5000
zookeeper_snap_count: 100000
# start benchmark
step2:
skip: false
skip_stress_database: false
bench_type: write
stale_read: true
result_path: timeseries.csv
connections: 100
clients: 100
key_size: 8
@ -36,5 +38,4 @@ step2:
# after benchmark
step3:
skip: false
result_path: result.log
skip_stop_database: false