diff --git a/analyze/04_aggregate_all_analyze_data.go b/analyze/04_aggregate_all_analyze_data.go index b1cc466a..d18f5807 100644 --- a/analyze/04_aggregate_all_analyze_data.go +++ b/analyze/04_aggregate_all_analyze_data.go @@ -439,7 +439,7 @@ func (data *analyzeData) aggregateAll(memoryByKeyPath string, totalRequests int6 sort.Sort(keyNumAndMemorys(tslice)) // aggregate memory by number of keys - knms := processTimeSeries(tslice, 1000, totalRequests) + knms := findRangesMemory(tslice, 1000, totalRequests) ckk1 := dataframe.NewColumn("KEYS") ckk2 := dataframe.NewColumn("MIN-VMRSS-MB") ckk3 := dataframe.NewColumn("AVG-VMRSS-MB") diff --git a/analyze/process_timeseries.go b/analyze/find_ranges_memory.go similarity index 86% rename from analyze/process_timeseries.go rename to analyze/find_ranges_memory.go index 12481d38..b11b1144 100644 --- a/analyze/process_timeseries.go +++ b/analyze/find_ranges_memory.go @@ -16,7 +16,7 @@ package analyze import "sort" -func processTimeSeries(tslice []keyNumAndMemory, unit int64, totalRequests int64) []keyNumAndMemory { +func findRangesMemory(tslice []keyNumAndMemory, unit int64, totalRequests int64) []keyNumAndMemory { sort.Sort(keyNumAndMemorys(tslice)) cumulKeyN := int64(0) @@ -55,10 +55,16 @@ func processTimeSeries(tslice []keyNumAndMemory, unit int64, totalRequests int64 } kss := []keyNumAndMemory{} - delete(rm, 0) + delete(rm, 0) // drop data at beginning + for k, v := range rm { // make sure to use 'k' as keyNum - kss = append(kss, keyNumAndMemory{keyNum: k, minMemoryMB: v.minMemoryMB, avgMemoryMB: v.avgMemoryMB, maxMemoryMB: v.maxMemoryMB}) + kss = append(kss, keyNumAndMemory{ + keyNum: k, + minMemoryMB: v.minMemoryMB, + avgMemoryMB: v.avgMemoryMB, + maxMemoryMB: v.maxMemoryMB, + }) } sort.Sort(keyNumAndMemorys(kss)) diff --git a/analyze/process_timeseries_test.go b/analyze/find_ranges_memory_test.go similarity index 95% rename from analyze/process_timeseries_test.go rename to analyze/find_ranges_memory_test.go index 845fbde6..dc0d0977 100644 --- a/analyze/process_timeseries_test.go +++ b/analyze/find_ranges_memory_test.go @@ -19,7 +19,7 @@ import ( "testing" ) -func Test_processTimeSeries(t *testing.T) { +func Test_findRangesMemory(t *testing.T) { var tslice []keyNumAndMemory for i := int64(0); i < 10; i++ { dp := keyNumAndMemory{ @@ -29,7 +29,7 @@ func Test_processTimeSeries(t *testing.T) { tslice = append(tslice, dp) } - pss := processTimeSeries(tslice, 20, 555) + pss := findRangesMemory(tslice, 20, 555) expexcted := []keyNumAndMemory{ {keyNum: 20, avgMemoryMB: 1}, {keyNum: 40, avgMemoryMB: 1}, diff --git a/stress_report_aggregate.go b/find_ranges_latency.go similarity index 95% rename from stress_report_aggregate.go rename to find_ranges_latency.go index 18f4c193..5958c6e4 100644 --- a/stress_report_aggregate.go +++ b/find_ranges_latency.go @@ -21,7 +21,7 @@ import ( "github.com/coreos/dbtester/pkg/report" ) -// processTimeSeries sorts all data points by its timestamp. +// findRangesLatency sorts all data points by its timestamp. // And then aggregate by the cumulative throughput, // in order to map the number of keys to the average latency. // @@ -36,7 +36,7 @@ import ( // If unis is 1000 and the average throughput per second is 30,000 // and its average latency is 10ms, it will have 30 data points with // latency 10ms. -func processTimeSeries(tss report.TimeSeries, unit int64, totalRequests int64) keyNumToAvgLatencys { +func findRangesLatency(tss report.TimeSeries, unit int64, totalRequests int64) keyNumToAvgLatencys { sort.Sort(tss) cumulKeyN := int64(0) diff --git a/stress_report_aggregate_test.go b/find_ranges_latency_test.go similarity index 95% rename from stress_report_aggregate_test.go rename to find_ranges_latency_test.go index df2f1a95..66de8c42 100644 --- a/stress_report_aggregate_test.go +++ b/find_ranges_latency_test.go @@ -22,7 +22,7 @@ import ( "github.com/coreos/dbtester/pkg/report" ) -func Test_processTimeSeries(t *testing.T) { +func Test_findRangesLatency(t *testing.T) { var tslice report.TimeSeries for i := int64(0); i < 10; i++ { dp := report.DataPoint{ @@ -33,7 +33,7 @@ func Test_processTimeSeries(t *testing.T) { tslice = append(tslice, dp) } - pss := processTimeSeries(tslice, 20, 555) + pss := findRangesLatency(tslice, 20, 555) expexcted := []keyNumToAvgLatency{ {keyNum: 20, avgLat: 1}, {keyNum: 40, avgLat: 1}, diff --git a/stress_report_save_upload.go b/stress_report_save_upload.go index d32712b0..b1897ede 100644 --- a/stress_report_save_upload.go +++ b/stress_report_save_upload.go @@ -258,7 +258,7 @@ func (cfg *Config) saveDataLatencyThroughputTimeseries(gcfg TestGroup, st report } // aggregate latency by the number of keys - tss := processTimeSeries(st.TimeSeries, 1000, gcfg.RequestNumber) + tss := findRangesLatency(st.TimeSeries, 1000, gcfg.RequestNumber) ctt1 := dataframe.NewColumn("KEYS") ctt2 := dataframe.NewColumn("MIN-LATENCY-MS") ctt3 := dataframe.NewColumn("AVG-LATENCY-MS") diff --git a/test-results/2017Q1-00-etcd-zookeeper-consul/2017Q1-00-etcd-zookeeper-consul-latency-3M-by-key.png b/test-results/2017Q1-00-etcd-zookeeper-consul/2017Q1-00-etcd-zookeeper-consul-latency-3M-by-key.png index 5789b43b..2bf5cc03 100644 Binary files a/test-results/2017Q1-00-etcd-zookeeper-consul/2017Q1-00-etcd-zookeeper-consul-latency-3M-by-key.png and b/test-results/2017Q1-00-etcd-zookeeper-consul/2017Q1-00-etcd-zookeeper-consul-latency-3M-by-key.png differ diff --git a/test-results/2017Q1-00-etcd-zookeeper-consul/2017Q1-00-etcd-zookeeper-consul-memory-1M-by-key.png b/test-results/2017Q1-00-etcd-zookeeper-consul/2017Q1-00-etcd-zookeeper-consul-memory-1M-by-key.png index f063ab61..3b56bcb2 100644 Binary files a/test-results/2017Q1-00-etcd-zookeeper-consul/2017Q1-00-etcd-zookeeper-consul-memory-1M-by-key.png and b/test-results/2017Q1-00-etcd-zookeeper-consul/2017Q1-00-etcd-zookeeper-consul-memory-1M-by-key.png differ