*: rename, clean up

This commit is contained in:
Gyu-Ho Lee 2017-02-07 12:15:49 -08:00
parent ebfa0f8a31
commit ad579290da
No known key found for this signature in database
GPG Key ID: 1DDD39C7EB70C24C
8 changed files with 332 additions and 336 deletions

View File

@ -16,7 +16,6 @@ package analyze
import (
"fmt"
"sort"
"strings"
"github.com/gyuho/dataframe"
@ -436,7 +435,6 @@ func (data *analyzeData) aggregateAll(memoryByKeyPath string, totalRequests int6
}
tslice = append(tslice, point)
}
sort.Sort(keyNumAndMemorys(tslice))
// aggregate memory by number of keys
knms := findRangesMemory(tslice, 1000, totalRequests)

View File

@ -1,86 +0,0 @@
// Copyright 2017 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package analyze
import "sort"
func findRangesMemory(tslice []keyNumAndMemory, unit int64, totalRequests int64) []keyNumAndMemory {
sort.Sort(keyNumAndMemorys(tslice))
cumulKeyN := int64(0)
maxKey := int64(0)
rm := make(map[int64]keyNumAndMemory)
// this data is aggregated by second
// and we want to map number of keys to latency
// so the range is the key
// and the value is the cumulative throughput
for _, ts := range tslice {
cumulKeyN += ts.keyNum
if cumulKeyN < unit {
// not enough data points yet
continue
}
mem := ts
// cumulKeyN >= unit
for cumulKeyN > maxKey {
maxKey += unit
rm[maxKey] = mem
}
}
// fill-in empty rows
for i := maxKey; i < int64(totalRequests); i += unit {
if _, ok := rm[i]; !ok {
rm[i] = keyNumAndMemory{}
}
}
if _, ok := rm[int64(totalRequests)]; !ok {
rm[int64(totalRequests)] = keyNumAndMemory{}
}
kss := []keyNumAndMemory{}
delete(rm, 0) // drop data at beginning
for k, v := range rm {
// make sure to use 'k' as keyNum
kss = append(kss, keyNumAndMemory{
keyNum: k,
minMemoryMB: v.minMemoryMB,
avgMemoryMB: v.avgMemoryMB,
maxMemoryMB: v.maxMemoryMB,
})
}
sort.Sort(keyNumAndMemorys(kss))
return kss
}
type keyNumAndMemory struct {
keyNum int64
minMemoryMB float64
avgMemoryMB float64
maxMemoryMB float64
}
type keyNumAndMemorys []keyNumAndMemory
func (t keyNumAndMemorys) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t keyNumAndMemorys) Len() int { return len(t) }
func (t keyNumAndMemorys) Less(i, j int) bool { return t[i].keyNum < t[j].keyNum }

View File

@ -1,71 +0,0 @@
// Copyright 2017 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package analyze
import (
"reflect"
"testing"
)
func Test_findRangesMemory(t *testing.T) {
var tslice []keyNumAndMemory
for i := int64(0); i < 10; i++ {
dp := keyNumAndMemory{
keyNum: 50,
avgMemoryMB: float64(i + 1),
}
tslice = append(tslice, dp)
}
pss := findRangesMemory(tslice, 20, 555)
expexcted := []keyNumAndMemory{
{keyNum: 20, avgMemoryMB: 1},
{keyNum: 40, avgMemoryMB: 1},
{keyNum: 60, avgMemoryMB: 1},
{keyNum: 80, avgMemoryMB: 2},
{keyNum: 100, avgMemoryMB: 2},
{keyNum: 120, avgMemoryMB: 3},
{keyNum: 140, avgMemoryMB: 3},
{keyNum: 160, avgMemoryMB: 3},
{keyNum: 180, avgMemoryMB: 4},
{keyNum: 200, avgMemoryMB: 4},
{keyNum: 220, avgMemoryMB: 5},
{keyNum: 240, avgMemoryMB: 5},
{keyNum: 260, avgMemoryMB: 5},
{keyNum: 280, avgMemoryMB: 6},
{keyNum: 300, avgMemoryMB: 6},
{keyNum: 320, avgMemoryMB: 7},
{keyNum: 340, avgMemoryMB: 7},
{keyNum: 360, avgMemoryMB: 7},
{keyNum: 380, avgMemoryMB: 8},
{keyNum: 400, avgMemoryMB: 8},
{keyNum: 420, avgMemoryMB: 9},
{keyNum: 440, avgMemoryMB: 9},
{keyNum: 460, avgMemoryMB: 9},
{keyNum: 480, avgMemoryMB: 10},
{keyNum: 500, avgMemoryMB: 10},
{keyNum: 520, avgMemoryMB: 0},
{keyNum: 540, avgMemoryMB: 0},
{keyNum: 555, avgMemoryMB: 0},
}
if len(pss) != len(expexcted) {
t.Fatalf("expected %+v, got %+v", expexcted, pss)
}
for i, elem := range pss {
if !reflect.DeepEqual(elem, expexcted[i]) {
t.Fatalf("#%d: processed data point expected %+v, got %+v", i, expexcted[i], elem)
}
}
}

201
find_ranges.go Normal file
View File

@ -0,0 +1,201 @@
// Copyright 2017 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbtester
import (
"sort"
"time"
"github.com/coreos/dbtester/pkg/report"
)
// CumulativeKeyNumToAvgLatency wraps the cumulative number of keys
// and according latency data. So the higher 'CumulativeKeyNum' is,
// the later the data points are in the time series.
type CumulativeKeyNumToAvgLatency struct {
CumulativeKeyNum int64
MinLatency time.Duration
AvgLatency time.Duration
MaxLatency time.Duration
}
// CumulativeKeyNumToAvgLatencySlice is a slice of CumulativeKeyNumToAvgLatency.
type CumulativeKeyNumToAvgLatencySlice []CumulativeKeyNumToAvgLatency
func (t CumulativeKeyNumToAvgLatencySlice) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t CumulativeKeyNumToAvgLatencySlice) Len() int { return len(t) }
func (t CumulativeKeyNumToAvgLatencySlice) Less(i, j int) bool {
return t[i].CumulativeKeyNum < t[j].CumulativeKeyNum
}
// FindRangesLatency sorts all data points by its timestamp.
// And then aggregate by the cumulative throughput,
// in order to map the number of keys to the average latency.
//
// type DataPoint struct {
// Timestamp int64
// MinLatency time.Duration
// AvgLatency time.Duration
// MaxLatency time.Duration
// ThroughPut int64
// }
//
// If unis is 1000 and the average throughput per second is 30,000
// and its average latency is 10ms, it will have 30 data points with
// latency 10ms.
func FindRangesLatency(data report.TimeSeries, unit int64, totalRequests int64) CumulativeKeyNumToAvgLatencySlice {
// need to sort by timestamps because we want the 'cumulative'
// trends as we write more keys, 'report.TimeSeries' already implements
// sort interface, so just sort.Sort(data)
sort.Sort(data)
cumulKeyN := int64(0)
maxKey := int64(0)
rm := make(map[int64]CumulativeKeyNumToAvgLatency)
// this data is aggregated by second
// and we want to map number of keys to latency
// so the range is the key
// and the value is the cumulative throughput
for _, ts := range data {
cumulKeyN += ts.ThroughPut
if cumulKeyN < unit {
// not enough data points yet
continue
}
// cumulKeyN >= unit
for cumulKeyN > maxKey {
maxKey += unit
rm[maxKey] = CumulativeKeyNumToAvgLatency{
MinLatency: ts.MinLatency,
AvgLatency: ts.AvgLatency,
MaxLatency: ts.MaxLatency,
}
}
}
// fill-in empty rows
for i := maxKey; i < totalRequests; i += unit {
if _, ok := rm[i]; !ok {
rm[i] = CumulativeKeyNumToAvgLatency{}
}
}
if _, ok := rm[totalRequests]; !ok {
rm[totalRequests] = CumulativeKeyNumToAvgLatency{}
}
kss := []CumulativeKeyNumToAvgLatency{}
delete(rm, 0)
for k, v := range rm {
// make sure to use 'k' as CumulativeKeyNum
kss = append(kss, CumulativeKeyNumToAvgLatency{
CumulativeKeyNum: k,
MinLatency: v.MinLatency,
AvgLatency: v.AvgLatency,
MaxLatency: v.MaxLatency,
})
}
// sort by cumulative throughput (number of keys)
// in ascending order
sort.Sort(CumulativeKeyNumToAvgLatencySlice(kss))
return kss
}
// CumulativeKeyNumAndMemory wraps the cumulative number of keys
// and according memory data. So the higher 'CumulativeKeyNum' is,
// the later the data points are in the time series.
type CumulativeKeyNumAndMemory struct {
CumulativeKeyNum int64
MinMemoryMB float64
AvgMemoryMB float64
MaxMemoryMB float64
}
// CumulativeKeyNumAndMemorySlice is a slice of CumulativeKeyNumAndMemory.
type CumulativeKeyNumAndMemorySlice []CumulativeKeyNumAndMemory
func (t CumulativeKeyNumAndMemorySlice) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t CumulativeKeyNumAndMemorySlice) Len() int { return len(t) }
func (t CumulativeKeyNumAndMemorySlice) Less(i, j int) bool {
return t[i].CumulativeKeyNum < t[j].CumulativeKeyNum
}
// FindRangesMemory sorts all data points by its timestamp.
// And then aggregate by the cumulative throughput,
// in order to map the number of keys to the average memory usage.
func FindRangesMemory(data []CumulativeKeyNumAndMemory, unit int64, totalRequests int64) CumulativeKeyNumAndMemorySlice {
// TODO: need to sort by timestamps because we want the 'cumulative'
// trends as we write more keys, 'report.TimeSeries' already implements
// sort interface, so just sort.Sort(data)
//
cumulKeyN := int64(0)
maxKey := int64(0)
rm := make(map[int64]CumulativeKeyNumAndMemory)
// this data is aggregated by second
// and we want to map number of keys to memory usage
// so the range is the key
// and the value is the cumulative throughput
for _, ts := range data {
cumulKeyN += ts.CumulativeKeyNum
if cumulKeyN < unit {
// not enough data points yet
continue
}
mem := ts
// cumulKeyN >= unit
for cumulKeyN > maxKey {
maxKey += unit
rm[maxKey] = mem
}
}
// fill-in empty rows
for i := maxKey; i < int64(totalRequests); i += unit {
if _, ok := rm[i]; !ok {
rm[i] = CumulativeKeyNumAndMemory{}
}
}
if _, ok := rm[int64(totalRequests)]; !ok {
rm[int64(totalRequests)] = CumulativeKeyNumAndMemory{}
}
kss := []CumulativeKeyNumAndMemory{}
delete(rm, 0) // drop data at beginning
for k, v := range rm {
// make sure to use 'k' as keyNum
kss = append(kss, CumulativeKeyNumAndMemory{
CumulativeKeyNum: k,
MinMemoryMB: v.MinMemoryMB,
AvgMemoryMB: v.AvgMemoryMB,
MaxMemoryMB: v.MaxMemoryMB,
})
}
// sort by cumulative throughput (number of keys)
// in ascending order
sort.Sort(CumulativeKeyNumAndMemorySlice(kss))
return kss
}

View File

@ -1,97 +0,0 @@
// Copyright 2017 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbtester
import (
"sort"
"time"
"github.com/coreos/dbtester/pkg/report"
)
// findRangesLatency sorts all data points by its timestamp.
// And then aggregate by the cumulative throughput,
// in order to map the number of keys to the average latency.
//
// type DataPoint struct {
// Timestamp int64
// MinLatency time.Duration
// AvgLatency time.Duration
// MaxLatency time.Duration
// ThroughPut int64
// }
//
// If unis is 1000 and the average throughput per second is 30,000
// and its average latency is 10ms, it will have 30 data points with
// latency 10ms.
func findRangesLatency(tss report.TimeSeries, unit int64, totalRequests int64) keyNumToAvgLatencys {
sort.Sort(tss)
cumulKeyN := int64(0)
maxKey := int64(0)
rm := make(map[int64]keyNumToAvgLatency)
// this data is aggregated by second
// and we want to map number of keys to latency
// so the range is the key
// and the value is the cumulative throughput
for _, ts := range tss {
cumulKeyN += ts.ThroughPut
if cumulKeyN < unit {
// not enough data points yet
continue
}
// cumulKeyN >= unit
for cumulKeyN > maxKey {
maxKey += unit
rm[maxKey] = keyNumToAvgLatency{minLat: ts.MinLatency, avgLat: ts.AvgLatency, maxLat: ts.MaxLatency}
}
}
// fill-in empty rows
for i := maxKey; i < totalRequests; i += unit {
if _, ok := rm[i]; !ok {
rm[i] = keyNumToAvgLatency{}
}
}
if _, ok := rm[totalRequests]; !ok {
rm[totalRequests] = keyNumToAvgLatency{}
}
kss := []keyNumToAvgLatency{}
delete(rm, 0)
for k, v := range rm {
// make sure to use 'k' as keyNum
kss = append(kss, keyNumToAvgLatency{keyNum: k, minLat: v.minLat, avgLat: v.avgLat, maxLat: v.maxLat})
}
sort.Sort(keyNumToAvgLatencys(kss))
return kss
}
type keyNumToAvgLatency struct {
keyNum int64
minLat time.Duration
avgLat time.Duration
maxLat time.Duration
}
type keyNumToAvgLatencys []keyNumToAvgLatency
func (t keyNumToAvgLatencys) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t keyNumToAvgLatencys) Len() int { return len(t) }
func (t keyNumToAvgLatencys) Less(i, j int) bool { return t[i].keyNum < t[j].keyNum }

View File

@ -1,75 +0,0 @@
// Copyright 2017 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbtester
import (
"reflect"
"testing"
"time"
"github.com/coreos/dbtester/pkg/report"
)
func Test_findRangesLatency(t *testing.T) {
var tslice report.TimeSeries
for i := int64(0); i < 10; i++ {
dp := report.DataPoint{
Timestamp: i,
AvgLatency: time.Duration(i + 1),
ThroughPut: 50,
}
tslice = append(tslice, dp)
}
pss := findRangesLatency(tslice, 20, 555)
expexcted := []keyNumToAvgLatency{
{keyNum: 20, avgLat: 1},
{keyNum: 40, avgLat: 1},
{keyNum: 60, avgLat: 1},
{keyNum: 80, avgLat: 2},
{keyNum: 100, avgLat: 2},
{keyNum: 120, avgLat: 3},
{keyNum: 140, avgLat: 3},
{keyNum: 160, avgLat: 3},
{keyNum: 180, avgLat: 4},
{keyNum: 200, avgLat: 4},
{keyNum: 220, avgLat: 5},
{keyNum: 240, avgLat: 5},
{keyNum: 260, avgLat: 5},
{keyNum: 280, avgLat: 6},
{keyNum: 300, avgLat: 6},
{keyNum: 320, avgLat: 7},
{keyNum: 340, avgLat: 7},
{keyNum: 360, avgLat: 7},
{keyNum: 380, avgLat: 8},
{keyNum: 400, avgLat: 8},
{keyNum: 420, avgLat: 9},
{keyNum: 440, avgLat: 9},
{keyNum: 460, avgLat: 9},
{keyNum: 480, avgLat: 10},
{keyNum: 500, avgLat: 10},
{keyNum: 520, avgLat: 0},
{keyNum: 540, avgLat: 0},
{keyNum: 555, avgLat: 0},
}
if len(pss) != len(expexcted) {
t.Fatalf("expected %+v, got %+v", expexcted, pss)
}
for i, elem := range pss {
if !reflect.DeepEqual(elem, expexcted[i]) {
t.Fatalf("#%d: processed data point expected %+v, got %+v", i, expexcted[i], elem)
}
}
}

126
find_ranges_test.go Normal file
View File

@ -0,0 +1,126 @@
// Copyright 2017 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbtester
import (
"reflect"
"testing"
"time"
"github.com/coreos/dbtester/pkg/report"
)
func TestFindRangesMemory(t *testing.T) {
var data []CumulativeKeyNumAndMemory
for i := int64(0); i < 10; i++ {
dp := CumulativeKeyNumAndMemory{
CumulativeKeyNum: 50,
AvgMemoryMB: float64(i + 1),
}
data = append(data, dp)
}
pss := FindRangesMemory(data, 20, 555)
expexcted := []CumulativeKeyNumAndMemory{
{CumulativeKeyNum: 20, AvgMemoryMB: 1},
{CumulativeKeyNum: 40, AvgMemoryMB: 1},
{CumulativeKeyNum: 60, AvgMemoryMB: 1},
{CumulativeKeyNum: 80, AvgMemoryMB: 2},
{CumulativeKeyNum: 100, AvgMemoryMB: 2},
{CumulativeKeyNum: 120, AvgMemoryMB: 3},
{CumulativeKeyNum: 140, AvgMemoryMB: 3},
{CumulativeKeyNum: 160, AvgMemoryMB: 3},
{CumulativeKeyNum: 180, AvgMemoryMB: 4},
{CumulativeKeyNum: 200, AvgMemoryMB: 4},
{CumulativeKeyNum: 220, AvgMemoryMB: 5},
{CumulativeKeyNum: 240, AvgMemoryMB: 5},
{CumulativeKeyNum: 260, AvgMemoryMB: 5},
{CumulativeKeyNum: 280, AvgMemoryMB: 6},
{CumulativeKeyNum: 300, AvgMemoryMB: 6},
{CumulativeKeyNum: 320, AvgMemoryMB: 7},
{CumulativeKeyNum: 340, AvgMemoryMB: 7},
{CumulativeKeyNum: 360, AvgMemoryMB: 7},
{CumulativeKeyNum: 380, AvgMemoryMB: 8},
{CumulativeKeyNum: 400, AvgMemoryMB: 8},
{CumulativeKeyNum: 420, AvgMemoryMB: 9},
{CumulativeKeyNum: 440, AvgMemoryMB: 9},
{CumulativeKeyNum: 460, AvgMemoryMB: 9},
{CumulativeKeyNum: 480, AvgMemoryMB: 10},
{CumulativeKeyNum: 500, AvgMemoryMB: 10},
{CumulativeKeyNum: 520, AvgMemoryMB: 0},
{CumulativeKeyNum: 540, AvgMemoryMB: 0},
{CumulativeKeyNum: 555, AvgMemoryMB: 0},
}
if len(pss) != len(expexcted) {
t.Fatalf("expected %+v, got %+v", expexcted, pss)
}
for i, elem := range pss {
if !reflect.DeepEqual(elem, expexcted[i]) {
t.Fatalf("#%d: processed data point expected %+v, got %+v", i, expexcted[i], elem)
}
}
}
func TestFindRangesLatency(t *testing.T) {
var data report.TimeSeries
for i := int64(0); i < 10; i++ {
dp := report.DataPoint{
Timestamp: i,
AvgLatency: time.Duration(i + 1),
ThroughPut: 50,
}
data = append(data, dp)
}
pss := FindRangesLatency(data, 20, 555)
expexcted := []CumulativeKeyNumToAvgLatency{
{CumulativeKeyNum: 20, AvgLatency: 1},
{CumulativeKeyNum: 40, AvgLatency: 1},
{CumulativeKeyNum: 60, AvgLatency: 1},
{CumulativeKeyNum: 80, AvgLatency: 2},
{CumulativeKeyNum: 100, AvgLatency: 2},
{CumulativeKeyNum: 120, AvgLatency: 3},
{CumulativeKeyNum: 140, AvgLatency: 3},
{CumulativeKeyNum: 160, AvgLatency: 3},
{CumulativeKeyNum: 180, AvgLatency: 4},
{CumulativeKeyNum: 200, AvgLatency: 4},
{CumulativeKeyNum: 220, AvgLatency: 5},
{CumulativeKeyNum: 240, AvgLatency: 5},
{CumulativeKeyNum: 260, AvgLatency: 5},
{CumulativeKeyNum: 280, AvgLatency: 6},
{CumulativeKeyNum: 300, AvgLatency: 6},
{CumulativeKeyNum: 320, AvgLatency: 7},
{CumulativeKeyNum: 340, AvgLatency: 7},
{CumulativeKeyNum: 360, AvgLatency: 7},
{CumulativeKeyNum: 380, AvgLatency: 8},
{CumulativeKeyNum: 400, AvgLatency: 8},
{CumulativeKeyNum: 420, AvgLatency: 9},
{CumulativeKeyNum: 440, AvgLatency: 9},
{CumulativeKeyNum: 460, AvgLatency: 9},
{CumulativeKeyNum: 480, AvgLatency: 10},
{CumulativeKeyNum: 500, AvgLatency: 10},
{CumulativeKeyNum: 520, AvgLatency: 0},
{CumulativeKeyNum: 540, AvgLatency: 0},
{CumulativeKeyNum: 555, AvgLatency: 0},
}
if len(pss) != len(expexcted) {
t.Fatalf("expected %+v, got %+v", expexcted, pss)
}
for i, elem := range pss {
if !reflect.DeepEqual(elem, expexcted[i]) {
t.Fatalf("#%d: processed data point expected %+v, got %+v", i, expexcted[i], elem)
}
}
}

View File

@ -258,16 +258,16 @@ func (cfg *Config) saveDataLatencyThroughputTimeseries(gcfg TestGroup, st report
}
// aggregate latency by the number of keys
tss := findRangesLatency(st.TimeSeries, 1000, gcfg.RequestNumber)
tss := FindRangesLatency(st.TimeSeries, 1000, gcfg.RequestNumber)
ctt1 := dataframe.NewColumn("KEYS")
ctt2 := dataframe.NewColumn("MIN-LATENCY-MS")
ctt3 := dataframe.NewColumn("AVG-LATENCY-MS")
ctt4 := dataframe.NewColumn("MAX-LATENCY-MS")
for i := range tss {
ctt1.PushBack(dataframe.NewStringValue(tss[i].keyNum))
ctt2.PushBack(dataframe.NewStringValue(fmt.Sprintf("%f", toMillisecond(tss[i].minLat))))
ctt3.PushBack(dataframe.NewStringValue(fmt.Sprintf("%f", toMillisecond(tss[i].avgLat))))
ctt4.PushBack(dataframe.NewStringValue(fmt.Sprintf("%f", toMillisecond(tss[i].maxLat))))
ctt1.PushBack(dataframe.NewStringValue(tss[i].CumulativeKeyNum))
ctt2.PushBack(dataframe.NewStringValue(fmt.Sprintf("%f", toMillisecond(tss[i].MinLatency))))
ctt3.PushBack(dataframe.NewStringValue(fmt.Sprintf("%f", toMillisecond(tss[i].AvgLatency))))
ctt4.PushBack(dataframe.NewStringValue(fmt.Sprintf("%f", toMillisecond(tss[i].MaxLatency))))
}
frr := dataframe.New()