*: rename, update images

This commit is contained in:
Gyu-Ho Lee 2017-02-07 10:24:26 -08:00
parent 1b6f6b8ed9
commit ebfa0f8a31
8 changed files with 17 additions and 11 deletions

View File

@ -439,7 +439,7 @@ func (data *analyzeData) aggregateAll(memoryByKeyPath string, totalRequests int6
sort.Sort(keyNumAndMemorys(tslice))
// aggregate memory by number of keys
knms := processTimeSeries(tslice, 1000, totalRequests)
knms := findRangesMemory(tslice, 1000, totalRequests)
ckk1 := dataframe.NewColumn("KEYS")
ckk2 := dataframe.NewColumn("MIN-VMRSS-MB")
ckk3 := dataframe.NewColumn("AVG-VMRSS-MB")

View File

@ -16,7 +16,7 @@ package analyze
import "sort"
func processTimeSeries(tslice []keyNumAndMemory, unit int64, totalRequests int64) []keyNumAndMemory {
func findRangesMemory(tslice []keyNumAndMemory, unit int64, totalRequests int64) []keyNumAndMemory {
sort.Sort(keyNumAndMemorys(tslice))
cumulKeyN := int64(0)
@ -55,10 +55,16 @@ func processTimeSeries(tslice []keyNumAndMemory, unit int64, totalRequests int64
}
kss := []keyNumAndMemory{}
delete(rm, 0)
delete(rm, 0) // drop data at beginning
for k, v := range rm {
// make sure to use 'k' as keyNum
kss = append(kss, keyNumAndMemory{keyNum: k, minMemoryMB: v.minMemoryMB, avgMemoryMB: v.avgMemoryMB, maxMemoryMB: v.maxMemoryMB})
kss = append(kss, keyNumAndMemory{
keyNum: k,
minMemoryMB: v.minMemoryMB,
avgMemoryMB: v.avgMemoryMB,
maxMemoryMB: v.maxMemoryMB,
})
}
sort.Sort(keyNumAndMemorys(kss))

View File

@ -19,7 +19,7 @@ import (
"testing"
)
func Test_processTimeSeries(t *testing.T) {
func Test_findRangesMemory(t *testing.T) {
var tslice []keyNumAndMemory
for i := int64(0); i < 10; i++ {
dp := keyNumAndMemory{
@ -29,7 +29,7 @@ func Test_processTimeSeries(t *testing.T) {
tslice = append(tslice, dp)
}
pss := processTimeSeries(tslice, 20, 555)
pss := findRangesMemory(tslice, 20, 555)
expexcted := []keyNumAndMemory{
{keyNum: 20, avgMemoryMB: 1},
{keyNum: 40, avgMemoryMB: 1},

View File

@ -21,7 +21,7 @@ import (
"github.com/coreos/dbtester/pkg/report"
)
// processTimeSeries sorts all data points by its timestamp.
// findRangesLatency sorts all data points by its timestamp.
// And then aggregate by the cumulative throughput,
// in order to map the number of keys to the average latency.
//
@ -36,7 +36,7 @@ import (
// If unis is 1000 and the average throughput per second is 30,000
// and its average latency is 10ms, it will have 30 data points with
// latency 10ms.
func processTimeSeries(tss report.TimeSeries, unit int64, totalRequests int64) keyNumToAvgLatencys {
func findRangesLatency(tss report.TimeSeries, unit int64, totalRequests int64) keyNumToAvgLatencys {
sort.Sort(tss)
cumulKeyN := int64(0)

View File

@ -22,7 +22,7 @@ import (
"github.com/coreos/dbtester/pkg/report"
)
func Test_processTimeSeries(t *testing.T) {
func Test_findRangesLatency(t *testing.T) {
var tslice report.TimeSeries
for i := int64(0); i < 10; i++ {
dp := report.DataPoint{
@ -33,7 +33,7 @@ func Test_processTimeSeries(t *testing.T) {
tslice = append(tslice, dp)
}
pss := processTimeSeries(tslice, 20, 555)
pss := findRangesLatency(tslice, 20, 555)
expexcted := []keyNumToAvgLatency{
{keyNum: 20, avgLat: 1},
{keyNum: 40, avgLat: 1},

View File

@ -258,7 +258,7 @@ func (cfg *Config) saveDataLatencyThroughputTimeseries(gcfg TestGroup, st report
}
// aggregate latency by the number of keys
tss := processTimeSeries(st.TimeSeries, 1000, gcfg.RequestNumber)
tss := findRangesLatency(st.TimeSeries, 1000, gcfg.RequestNumber)
ctt1 := dataframe.NewColumn("KEYS")
ctt2 := dataframe.NewColumn("MIN-LATENCY-MS")
ctt3 := dataframe.NewColumn("AVG-LATENCY-MS")

Binary file not shown.

Before

Width:  |  Height:  |  Size: 73 KiB

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

After

Width:  |  Height:  |  Size: 73 KiB