mirror of https://github.com/etcd-io/dbtester.git
vendor: revendor 'gyuho/dataframe', 'gyuho/psn'
This commit is contained in:
parent
447fc2e50b
commit
caf060d974
|
|
@ -1,527 +0,0 @@
|
|||
// Copyright 2017 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package analyze
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gonum/plot"
|
||||
"github.com/gonum/plot/plotter"
|
||||
"github.com/gonum/plot/plotutil"
|
||||
"github.com/gonum/plot/vg"
|
||||
"github.com/gyuho/dataframe"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func commandFunc(cmd *cobra.Command, args []string) error {
|
||||
cfg, err := ReadConfig(configPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
println()
|
||||
plog.Println("Step 1: aggregating all system metrics CSV files from each database")
|
||||
for step1Idx, elem := range cfg.Step1 {
|
||||
var (
|
||||
frames = []dataframe.Frame{}
|
||||
maxCommonMinUnixTime int64
|
||||
maxCommonMaxUnixTime int64
|
||||
)
|
||||
for i, monitorPath := range elem.DataPathList {
|
||||
plog.Printf("Step 1-%d-%d: creating dataframe from %s", step1Idx, i, monitorPath)
|
||||
originalFrame, err := dataframe.NewFromCSV(nil, monitorPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newFrame := dataframe.New()
|
||||
var tsc dataframe.Column
|
||||
for _, name := range columnsToAggregate {
|
||||
cmn, err := originalFrame.GetColumn(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "UNIX-TS" {
|
||||
tsc = cmn
|
||||
}
|
||||
if err = newFrame.AddColumn(cmn); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
frames = append(frames, newFrame)
|
||||
|
||||
fv, ok := tsc.FrontNonNil()
|
||||
if !ok {
|
||||
return fmt.Errorf("FrontNonNil %s has empty Unix time %v", monitorPath, fv)
|
||||
}
|
||||
fs, ok := fv.ToString()
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot ToString %v", fv)
|
||||
}
|
||||
fd, err := strconv.ParseInt(fs, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bv, ok := tsc.BackNonNil()
|
||||
if !ok {
|
||||
return fmt.Errorf("BackNonNil %s has empty Unix time %v", monitorPath, fv)
|
||||
}
|
||||
bs, ok := bv.ToString()
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot ToString %v", bv)
|
||||
}
|
||||
bd, err := strconv.ParseInt(bs, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if i == 0 {
|
||||
maxCommonMinUnixTime = fd
|
||||
maxCommonMaxUnixTime = bd
|
||||
}
|
||||
if maxCommonMinUnixTime < fd {
|
||||
maxCommonMinUnixTime = fd
|
||||
}
|
||||
if maxCommonMaxUnixTime > bd {
|
||||
maxCommonMaxUnixTime = bd
|
||||
}
|
||||
}
|
||||
|
||||
// monitor CSVs from multiple servers, and want them to have equal number of rows
|
||||
// Truncate all rows before maxCommonMinUnixTime and after maxCommonMinUnixTime
|
||||
minTS := fmt.Sprintf("%d", maxCommonMinUnixTime)
|
||||
maxTS := fmt.Sprintf("%d", maxCommonMaxUnixTime)
|
||||
aggregatedFrame := dataframe.New()
|
||||
for i := range frames {
|
||||
uc, err := frames[i].GetColumn("UNIX-TS")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
j, ok := uc.FindValue(dataframe.NewStringValue(minTS))
|
||||
if !ok {
|
||||
return fmt.Errorf("%v does not exist in %s", minTS, elem.DataPathList[i])
|
||||
}
|
||||
k, ok := uc.FindValue(dataframe.NewStringValue(maxTS))
|
||||
if !ok {
|
||||
return fmt.Errorf("%v does not exist in %s", maxTS, elem.DataPathList[i])
|
||||
}
|
||||
|
||||
for _, header := range frames[i].GetHeader() {
|
||||
if i > 0 && header == "UNIX-TS" {
|
||||
continue
|
||||
}
|
||||
var col dataframe.Column
|
||||
col, err = frames[i].GetColumn(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = col.KeepRows(j, k+1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// update column name with database name and its index
|
||||
// all in one aggregated CSV file
|
||||
if header != "UNIX-TS" {
|
||||
switch header {
|
||||
case "CPU-NUM":
|
||||
header = "CPU"
|
||||
case "VMRSS-NUM":
|
||||
header = "VMRSS-MB"
|
||||
|
||||
// to bytes to mb
|
||||
colN := col.RowNumber()
|
||||
for rowIdx := 0; rowIdx < colN; rowIdx++ {
|
||||
var rowV dataframe.Value
|
||||
rowV, err = col.GetValue(rowIdx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fv, _ := rowV.ToNumber()
|
||||
frv := float64(fv) * 0.000001
|
||||
if err = col.SetValue(rowIdx, dataframe.NewStringValue(fmt.Sprintf("%.2f", frv))); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case "EXTRA":
|
||||
header = "CLIENT-NUM"
|
||||
}
|
||||
|
||||
col.UpdateHeader(fmt.Sprintf("%s-%d", header, i+1))
|
||||
}
|
||||
|
||||
if err = aggregatedFrame.AddColumn(col); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
plog.Printf("Step 1-%d-%d: creating dataframe from %s", step1Idx, len(elem.DataPathList), elem.DataBenchmarkPath)
|
||||
colMonitorUnixTs, err := aggregatedFrame.GetColumn("UNIX-TS")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// need to combine aggregatedFrame to benchResultFrame by unix timestamps
|
||||
benchResultFrame, err := dataframe.NewFromCSV(nil, elem.DataBenchmarkPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
colBenchUnixTs, err := benchResultFrame.GetColumn("UNIX-TS")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fv, ok := colBenchUnixTs.FrontNonNil()
|
||||
if !ok {
|
||||
return fmt.Errorf("FrontNonNil %s has empty Unix time %v", elem.DataBenchmarkPath, fv)
|
||||
}
|
||||
startRowMonitor, ok := colMonitorUnixTs.FindValue(fv)
|
||||
if !ok {
|
||||
return fmt.Errorf("%v is not found in monitor results %q", fv, elem.DataPathList)
|
||||
}
|
||||
bv, ok := colBenchUnixTs.BackNonNil()
|
||||
if !ok {
|
||||
return fmt.Errorf("BackNonNil %s has empty Unix time %v", elem.DataBenchmarkPath, bv)
|
||||
}
|
||||
endRowMonitor, ok := colMonitorUnixTs.FindValue(bv)
|
||||
if !ok { // monitor short of rows
|
||||
endRowMonitor = colMonitorUnixTs.RowNumber() - 1
|
||||
}
|
||||
|
||||
var benchLastIdx int
|
||||
for _, col := range benchResultFrame.GetColumns() {
|
||||
if benchLastIdx == 0 {
|
||||
benchLastIdx = col.RowNumber()
|
||||
}
|
||||
if benchLastIdx > col.RowNumber() {
|
||||
benchLastIdx = col.RowNumber()
|
||||
}
|
||||
}
|
||||
benchLastIdx--
|
||||
|
||||
if benchLastIdx+1 < endRowMonitor-startRowMonitor+1 { // benchmark is short of rows
|
||||
endRowMonitor = startRowMonitor + benchLastIdx
|
||||
} else { // monitor is short of rows
|
||||
benchLastIdx = endRowMonitor - startRowMonitor
|
||||
}
|
||||
|
||||
for _, hd := range aggregatedFrame.GetHeader() {
|
||||
if hd == "UNIX-TS" {
|
||||
continue
|
||||
}
|
||||
var col dataframe.Column
|
||||
col, err = aggregatedFrame.GetColumn(hd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = col.KeepRows(startRowMonitor, endRowMonitor); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = benchResultFrame.AddColumn(col); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
plog.Printf("Step 1-%d-%d: calculating average, cumulative values", step1Idx, len(elem.DataPathList)+1)
|
||||
var (
|
||||
sampleSize = float64(len(elem.DataPathList))
|
||||
cumulativeThroughputCol = dataframe.NewColumn("CUMULATIVE-AVG-THROUGHPUT")
|
||||
totalThrougput int
|
||||
avgCPUCol = dataframe.NewColumn("AVG-CPU")
|
||||
avgVMRSSMBCol = dataframe.NewColumn("AVG-VMRSS-MB")
|
||||
|
||||
// TODO: average value of disk stats, network stats
|
||||
)
|
||||
for i := 0; i < benchLastIdx; i++ {
|
||||
var (
|
||||
cpuTotal float64
|
||||
memoryTotal float64
|
||||
)
|
||||
for _, col := range benchResultFrame.GetColumns() {
|
||||
var rv dataframe.Value
|
||||
rv, err = col.GetValue(i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fv, _ := rv.ToNumber()
|
||||
switch {
|
||||
case strings.HasPrefix(col.GetHeader(), "CPU-"):
|
||||
cpuTotal += fv
|
||||
|
||||
case strings.HasPrefix(col.GetHeader(), "VMRSS-"):
|
||||
memoryTotal += fv
|
||||
|
||||
case col.GetHeader() == "AVG-THROUGHPUT":
|
||||
fv, _ := rv.ToNumber()
|
||||
totalThrougput += int(fv)
|
||||
cumulativeThroughputCol.PushBack(dataframe.NewStringValue(totalThrougput))
|
||||
}
|
||||
}
|
||||
avgCPUCol.PushBack(dataframe.NewStringValue(fmt.Sprintf("%.2f", cpuTotal/sampleSize)))
|
||||
avgVMRSSMBCol.PushBack(dataframe.NewStringValue(fmt.Sprintf("%.2f", memoryTotal/sampleSize)))
|
||||
}
|
||||
|
||||
plog.Printf("Step 1-%d-%d: combine %s and %q", step1Idx, len(elem.DataPathList)+2, elem.DataBenchmarkPath, elem.DataPathList)
|
||||
unixTsCol, err := benchResultFrame.GetColumn("UNIX-TS")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
latencyCol, err := benchResultFrame.GetColumn("AVG-LATENCY-MS")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
throughputCol, err := benchResultFrame.GetColumn("AVG-THROUGHPUT")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
aggFr := dataframe.New()
|
||||
aggFr.AddColumn(unixTsCol)
|
||||
aggFr.AddColumn(latencyCol)
|
||||
aggFr.AddColumn(throughputCol)
|
||||
aggFr.AddColumn(cumulativeThroughputCol)
|
||||
for _, hd := range benchResultFrame.GetHeader() {
|
||||
col, err := benchResultFrame.GetColumn(hd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(hd, "CPU-"):
|
||||
aggFr.AddColumn(col)
|
||||
case strings.HasPrefix(hd, "VMRSS-"):
|
||||
aggFr.AddColumn(col)
|
||||
}
|
||||
}
|
||||
aggFr.AddColumn(avgCPUCol)
|
||||
aggFr.AddColumn(avgVMRSSMBCol)
|
||||
|
||||
plog.Printf("Step 1-%d-%d: saving to %s", step1Idx, len(elem.DataPathList)+3, elem.OutputPath)
|
||||
if err := aggFr.ToCSV(elem.OutputPath); err != nil {
|
||||
return err
|
||||
}
|
||||
println()
|
||||
}
|
||||
|
||||
println()
|
||||
plog.Println("Step 2: aggregating aggregates...")
|
||||
for step2Idx, elem := range cfg.Step2 {
|
||||
var (
|
||||
frames = []dataframe.Frame{}
|
||||
maxSize int
|
||||
)
|
||||
for i, data := range elem.DataList {
|
||||
plog.Printf("Step 2-%d-%d: creating dataframe from %s...", step2Idx, i, data.Path)
|
||||
fr, err := dataframe.NewFromCSV(nil, data.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
frames = append(frames, fr)
|
||||
|
||||
col, err := fr.GetColumn("UNIX-TS")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rNum := col.RowNumber()
|
||||
if maxSize < rNum {
|
||||
maxSize = rNum
|
||||
}
|
||||
}
|
||||
|
||||
nf := dataframe.New()
|
||||
secondCol := dataframe.NewColumn("SECOND")
|
||||
for i := 0; i < maxSize; i++ {
|
||||
secondCol.PushBack(dataframe.NewStringValue(i))
|
||||
}
|
||||
nf.AddColumn(secondCol)
|
||||
|
||||
// TODO: keep disk, network stats columns
|
||||
colsToKeep := []string{"AVG-LATENCY-MS", "AVG-THROUGHPUT", "CUMULATIVE-AVG-THROUGHPUT", "AVG-CPU", "AVG-VMRSS-MB"}
|
||||
for i, fr := range frames {
|
||||
dbID := elem.DataList[i].Name
|
||||
plog.Printf("Step 2-%d-%d: cleaning up %s...", step2Idx, i, dbID)
|
||||
for _, col := range fr.GetColumns() {
|
||||
toSkip := true
|
||||
for _, cv := range colsToKeep {
|
||||
if col.GetHeader() == cv {
|
||||
toSkip = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if toSkip {
|
||||
continue
|
||||
}
|
||||
if err := col.Appends(dataframe.NewStringValueNil(), maxSize); err != nil {
|
||||
return err
|
||||
}
|
||||
col.UpdateHeader(fmt.Sprintf("%s-%s", col.GetHeader(), dbID))
|
||||
nf.AddColumn(col)
|
||||
}
|
||||
}
|
||||
|
||||
plog.Printf("Step 2-%d: saving to %s...", step2Idx, elem.OutputPath)
|
||||
if err := nf.ToCSV(elem.OutputPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
println()
|
||||
plog.Println("Step 3: plotting...")
|
||||
|
||||
plot.DefaultFont = "Helvetica"
|
||||
plotter.DefaultLineStyle.Width = vg.Points(1.5)
|
||||
plotter.DefaultGlyphStyle.Radius = vg.Points(2.0)
|
||||
var (
|
||||
plotWidth = 12 * vg.Inch
|
||||
plotHeight = 8 * vg.Inch
|
||||
)
|
||||
for step3Idx, elem := range cfg.Step3 {
|
||||
fr, err := dataframe.NewFromCSV(nil, elem.DataPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
plog.Printf("Step 3-%d: %s with %q", step3Idx, elem.DataPath, fr.GetHeader())
|
||||
|
||||
for i, pelem := range elem.PlotList {
|
||||
plog.Printf("Step 3-%d-%d: %s at %q", step3Idx, i, pelem.YAxis, pelem.OutputPathList)
|
||||
pl, err := plot.New()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pl.Title.Text = fmt.Sprintf("%s, %s", cfg.Titles[step3Idx], pelem.YAxis)
|
||||
pl.X.Label.Text = pelem.XAxis
|
||||
pl.Y.Label.Text = pelem.YAxis
|
||||
pl.Legend.Top = true
|
||||
|
||||
// var args []interface{}
|
||||
// for _, line := range pelem.Lines {
|
||||
// col, err := fr.GetColumn(line.Column)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// pt, err := points(col)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// args = append(args, line.Legend, pt)
|
||||
// }
|
||||
// if err = plotutil.AddLines(pl, args...); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
var ps []plot.Plotter
|
||||
for j, line := range pelem.Lines {
|
||||
col, err := fr.GetColumn(line.Column)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pt, err := points(col)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l, err := plotter.NewLine(pt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Color = getRGB(line.Legend, j)
|
||||
l.Dashes = plotutil.Dashes(j)
|
||||
ps = append(ps, l)
|
||||
|
||||
pl.Legend.Add(line.Legend, l)
|
||||
}
|
||||
pl.Add(ps...)
|
||||
|
||||
for _, outputPath := range pelem.OutputPathList {
|
||||
if err = pl.Save(plotWidth, plotHeight, outputPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println()
|
||||
plog.Println("Step 4: writing README...")
|
||||
rdBuf := new(bytes.Buffer)
|
||||
rdBuf.WriteString("\n\n")
|
||||
rdBuf.WriteString(cfg.Step4.Preface)
|
||||
rdBuf.WriteString("\n\n\n")
|
||||
for i, result := range cfg.Step4.Results {
|
||||
rdBuf.WriteString(fmt.Sprintf("<br><br>\n##### %s", cfg.Titles[i]))
|
||||
rdBuf.WriteString("\n\n")
|
||||
for _, img := range result.Images {
|
||||
imgPath := ""
|
||||
switch img.ImageType {
|
||||
case "local":
|
||||
imgPath = "./" + filepath.Base(img.ImagePath)
|
||||
rdBuf.WriteString(fmt.Sprintf("\n\n", img.ImageTitle, imgPath))
|
||||
case "remote":
|
||||
rdBuf.WriteString(fmt.Sprintf(`<img src="%s" alt="%s">`, img.ImagePath, img.ImageTitle))
|
||||
rdBuf.WriteString("\n\n")
|
||||
default:
|
||||
return fmt.Errorf("%s is not supported", img.ImageType)
|
||||
}
|
||||
}
|
||||
rdBuf.WriteString("\n\n")
|
||||
}
|
||||
if err := toFile(rdBuf.String(), cfg.Step4.OutputPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
println()
|
||||
plog.Println("FINISHED!")
|
||||
return nil
|
||||
}
|
||||
|
||||
func toFile(txt, fpath string) error {
|
||||
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_TRUNC, 0777)
|
||||
if err != nil {
|
||||
f, err = os.Create(fpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(txt); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func points(col dataframe.Column) (plotter.XYs, error) {
|
||||
bv, ok := col.BackNonNil()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("BackNonNil not found")
|
||||
}
|
||||
rowN, ok := col.FindLastValue(bv)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not found %v", bv)
|
||||
}
|
||||
pts := make(plotter.XYs, rowN)
|
||||
for i := range pts {
|
||||
v, err := col.GetValue(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n, _ := v.ToNumber()
|
||||
pts[i].X = float64(i)
|
||||
pts[i].Y = n
|
||||
}
|
||||
return pts, nil
|
||||
}
|
||||
|
|
@ -22,19 +22,21 @@ import (
|
|||
)
|
||||
|
||||
var sysMetricsColumnsToRead = []string{
|
||||
"UNIX-TS", "CPU-NUM", "VMRSS-NUM",
|
||||
"UNIX-TS",
|
||||
"CPU-NUM",
|
||||
"VMRSS-NUM",
|
||||
"READS-COMPLETED",
|
||||
"READS-COMPLETED-DIFF",
|
||||
"READS-COMPLETED-DELTA",
|
||||
"SECTORS-READ",
|
||||
"SECTORS-READ-DIFF",
|
||||
"SECTORS-READ-DELTA",
|
||||
"WRITES-COMPLETED",
|
||||
"WRITES-COMPLETED-DIFF",
|
||||
"WRITES-COMPLETED-DELTA",
|
||||
"SECTORS-WRITTEN",
|
||||
"SECTORS-WRITTEN-DIFF",
|
||||
"SECTORS-WRITTEN-DELTA",
|
||||
"RECEIVE-BYTES-NUM",
|
||||
"RECEIVE-BYTES-NUM-DIFF",
|
||||
"RECEIVE-BYTES-NUM-DELTA",
|
||||
"TRANSMIT-BYTES-NUM",
|
||||
"TRANSMIT-BYTES-NUM-DIFF",
|
||||
"TRANSMIT-BYTES-NUM-DELTA",
|
||||
"EXTRA",
|
||||
}
|
||||
|
||||
|
|
@ -57,7 +59,7 @@ func readSystemMetrics(fpath string) (data testData, err error) {
|
|||
var unixTSColumn dataframe.Column
|
||||
for _, name := range sysMetricsColumnsToRead {
|
||||
var column dataframe.Column
|
||||
column, err = originalFrame.GetColumn(name)
|
||||
column, err = originalFrame.Column(name)
|
||||
if err != nil {
|
||||
return testData{}, err
|
||||
}
|
||||
|
|
@ -74,9 +76,9 @@ func readSystemMetrics(fpath string) (data testData, err error) {
|
|||
if !ok {
|
||||
return testData{}, fmt.Errorf("FrontNonNil %s has empty Unix time %v", fpath, fv)
|
||||
}
|
||||
fs, ok := fv.ToString()
|
||||
fs, ok := fv.String()
|
||||
if !ok {
|
||||
return testData{}, fmt.Errorf("cannot ToString %v", fv)
|
||||
return testData{}, fmt.Errorf("cannot String %v", fv)
|
||||
}
|
||||
data.frontUnixTS, err = strconv.ParseInt(fs, 10, 64)
|
||||
if err != nil {
|
||||
|
|
@ -88,9 +90,9 @@ func readSystemMetrics(fpath string) (data testData, err error) {
|
|||
if !ok {
|
||||
return testData{}, fmt.Errorf("BackNonNil %s has empty Unix time %v", fpath, fv)
|
||||
}
|
||||
bs, ok := bv.ToString()
|
||||
bs, ok := bv.String()
|
||||
if !ok {
|
||||
return testData{}, fmt.Errorf("cannot ToString %v", bv)
|
||||
return testData{}, fmt.Errorf("cannot String %v", bv)
|
||||
}
|
||||
data.lastUnixTS, err = strconv.ParseInt(bs, 10, 64)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -69,32 +69,32 @@ func (data *analyzeData) aggSystemMetrics() error {
|
|||
maxTS := fmt.Sprintf("%d", data.maxUnixTS)
|
||||
data.sysAgg = dataframe.New()
|
||||
for i := range data.sys {
|
||||
uc, err := data.sys[i].frame.GetColumn("UNIX-TS")
|
||||
uc, err := data.sys[i].frame.Column("UNIX-TS")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
minTSIdx, ok := uc.FindValue(dataframe.NewStringValue(minTS))
|
||||
minTSIdx, ok := uc.FindFirst(dataframe.NewStringValue(minTS))
|
||||
if !ok {
|
||||
return fmt.Errorf("%v does not exist in %s", minTS, data.sys[i].filePath)
|
||||
}
|
||||
maxTSIdx, ok := uc.FindValue(dataframe.NewStringValue(maxTS))
|
||||
maxTSIdx, ok := uc.FindFirst(dataframe.NewStringValue(maxTS))
|
||||
if !ok {
|
||||
return fmt.Errorf("%v does not exist in %s", maxTS, data.sys[i].filePath)
|
||||
}
|
||||
|
||||
for _, header := range data.sys[i].frame.GetHeader() {
|
||||
for _, header := range data.sys[i].frame.Headers() {
|
||||
if i > 0 && header == "UNIX-TS" {
|
||||
// skip for other databases; we want to keep just one UNIX-TS column
|
||||
continue
|
||||
}
|
||||
|
||||
var col dataframe.Column
|
||||
col, err = data.sys[i].frame.GetColumn(header)
|
||||
col, err = data.sys[i].frame.Column(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// just keep rows from [min,maxUnixTS]
|
||||
if err = col.KeepRows(minTSIdx, maxTSIdx+1); err != nil {
|
||||
if err = col.Keep(minTSIdx, maxTSIdx+1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -113,16 +113,16 @@ func (data *analyzeData) aggSystemMetrics() error {
|
|||
header = "VMRSS-MB"
|
||||
|
||||
// convert bytes to mb
|
||||
colN := col.RowNumber()
|
||||
colN := col.CountRow()
|
||||
for rowIdx := 0; rowIdx < colN; rowIdx++ {
|
||||
var rowV dataframe.Value
|
||||
rowV, err = col.GetValue(rowIdx)
|
||||
rowV, err = col.Value(rowIdx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fv, _ := rowV.ToNumber()
|
||||
fv, _ := rowV.Number()
|
||||
frv := float64(fv) * 0.000001
|
||||
if err = col.SetValue(rowIdx, dataframe.NewStringValue(fmt.Sprintf("%.2f", frv))); err != nil {
|
||||
if err = col.Set(rowIdx, dataframe.NewStringValue(fmt.Sprintf("%.2f", frv))); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ func (data *analyzeData) importBenchMetrics(fpath string) (err error) {
|
|||
}
|
||||
|
||||
var unixTSColumn dataframe.Column
|
||||
unixTSColumn, err = data.benchMetrics.frame.GetColumn("UNIX-TS")
|
||||
unixTSColumn, err = data.benchMetrics.frame.Column("UNIX-TS")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -43,9 +43,9 @@ func (data *analyzeData) importBenchMetrics(fpath string) (err error) {
|
|||
if !ok {
|
||||
return fmt.Errorf("FrontNonNil %s has empty Unix time %v", fpath, fv)
|
||||
}
|
||||
fs, ok := fv.ToString()
|
||||
fs, ok := fv.String()
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot ToString %v", fv)
|
||||
return fmt.Errorf("cannot String %v", fv)
|
||||
}
|
||||
data.benchMetrics.frontUnixTS, err = strconv.ParseInt(fs, 10, 64)
|
||||
if err != nil {
|
||||
|
|
@ -57,9 +57,9 @@ func (data *analyzeData) importBenchMetrics(fpath string) (err error) {
|
|||
if !ok {
|
||||
return fmt.Errorf("BackNonNil %s has empty Unix time %v", fpath, fv)
|
||||
}
|
||||
bs, ok := bv.ToString()
|
||||
bs, ok := bv.String()
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot ToString %v", bv)
|
||||
return fmt.Errorf("cannot String %v", bv)
|
||||
}
|
||||
data.benchMetrics.lastUnixTS, err = strconv.ParseInt(bs, 10, 64)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -11,12 +11,12 @@ import (
|
|||
// aggSystemBenchMetrics aggregates all system metrics from 3+ nodes.
|
||||
func (data *analyzeData) aggSystemBenchMetrics() error {
|
||||
plog.Println("STEP #3: aggregating system metrics and benchmark metrics")
|
||||
colSys, err := data.sysAgg.GetColumn("UNIX-TS")
|
||||
colSys, err := data.sysAgg.Column("UNIX-TS")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
colBench, err := data.benchMetrics.frame.GetColumn("UNIX-TS")
|
||||
colBench, err := data.benchMetrics.frame.Column("UNIX-TS")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -29,23 +29,23 @@ func (data *analyzeData) aggSystemBenchMetrics() error {
|
|||
return fmt.Errorf("BackNonNil %s has empty Unix time %v", data.benchMetrics.filePath, fv)
|
||||
}
|
||||
|
||||
sysStartIdx, ok := colSys.FindValue(fv)
|
||||
sysStartIdx, ok := colSys.FindFirst(fv)
|
||||
if !ok {
|
||||
return fmt.Errorf("%v is not found in system metrics results", fv)
|
||||
}
|
||||
sysEndIdx, ok := colSys.FindValue(bv)
|
||||
sysEndIdx, ok := colSys.FindFirst(bv)
|
||||
if !ok {
|
||||
return fmt.Errorf("%v is not found in system metrics results", fv)
|
||||
}
|
||||
sysRowN := sysEndIdx - sysStartIdx + 1
|
||||
|
||||
var minBenchEndIdx int
|
||||
for _, col := range data.benchMetrics.frame.GetColumns() {
|
||||
for _, col := range data.benchMetrics.frame.Columns() {
|
||||
if minBenchEndIdx == 0 {
|
||||
minBenchEndIdx = col.RowNumber()
|
||||
minBenchEndIdx = col.CountRow()
|
||||
}
|
||||
if minBenchEndIdx > col.RowNumber() {
|
||||
minBenchEndIdx = col.RowNumber()
|
||||
if minBenchEndIdx > col.CountRow() {
|
||||
minBenchEndIdx = col.CountRow()
|
||||
}
|
||||
}
|
||||
minBenchEndIdx--
|
||||
|
|
@ -70,9 +70,9 @@ func (data *analyzeData) aggSystemBenchMetrics() error {
|
|||
|
||||
// first, add bench metrics data
|
||||
// UNIX-TS, AVG-LATENCY-MS, AVG-THROUGHPUT
|
||||
for _, col := range data.benchMetrics.frame.GetColumns() {
|
||||
for _, col := range data.benchMetrics.frame.Columns() {
|
||||
// ALWAYS KEEP FROM FIRST ROW OF BENCHMARKS
|
||||
if err = col.KeepRows(0, minBenchEndIdx); err != nil {
|
||||
if err = col.Keep(0, minBenchEndIdx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = data.sysBenchAgg.AddColumn(col); err != nil {
|
||||
|
|
@ -80,11 +80,11 @@ func (data *analyzeData) aggSystemBenchMetrics() error {
|
|||
}
|
||||
}
|
||||
|
||||
for _, col := range data.sysAgg.GetColumns() {
|
||||
if col.GetHeader() == "UNIX-TS" {
|
||||
for _, col := range data.sysAgg.Columns() {
|
||||
if col.Header() == "UNIX-TS" {
|
||||
continue
|
||||
}
|
||||
if err = col.KeepRows(sysStartIdx, sysEndIdx); err != nil {
|
||||
if err = col.Keep(sysStartIdx, sysEndIdx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = data.sysBenchAgg.AddColumn(col); err != nil {
|
||||
|
|
@ -98,32 +98,47 @@ func (data *analyzeData) aggSystemBenchMetrics() error {
|
|||
cumulativeThroughputCol = dataframe.NewColumn("CUMULATIVE-THROUGHPUT")
|
||||
|
||||
systemMetricsSize = float64(len(data.sys))
|
||||
avgCPUCol = dataframe.NewColumn("AVG-CPU")
|
||||
avgVMRSSMBCol = dataframe.NewColumn("AVG-VMRSS-MB")
|
||||
|
||||
avgCPUCol = dataframe.NewColumn("AVG-CPU")
|
||||
avgVMRSSMBCol = dataframe.NewColumn("AVG-VMRSS-MB")
|
||||
)
|
||||
// iterate horizontally across all the columns
|
||||
|
||||
// compute average value of 3+ nodes
|
||||
// by iterating each row (horizontally) for all the columns
|
||||
for rowIdx := 0; rowIdx < minBenchEndIdx; rowIdx++ {
|
||||
var (
|
||||
cpuSum float64
|
||||
vmrssMBSum float64
|
||||
)
|
||||
for _, col := range data.sysBenchAgg.GetColumns() {
|
||||
rv, err := col.GetValue(rowIdx)
|
||||
for _, col := range data.sysBenchAgg.Columns() {
|
||||
rv, err := col.Value(rowIdx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vv, _ := rv.ToNumber()
|
||||
vv, _ := rv.Number()
|
||||
|
||||
hd := col.Header()
|
||||
switch {
|
||||
case col.GetHeader() == "AVG-THROUGHPUT":
|
||||
// cumulative values
|
||||
case hd == "AVG-THROUGHPUT":
|
||||
requestSum += int(vv)
|
||||
cumulativeThroughputCol.PushBack(dataframe.NewStringValue(requestSum))
|
||||
|
||||
case strings.HasPrefix(col.GetHeader(), "CPU-"):
|
||||
// average values (need sume first!)
|
||||
case strings.HasPrefix(hd, "CPU-"):
|
||||
// CPU-NUM was converted to CPU-1, CPU-2, CPU-3
|
||||
cpuSum += vv
|
||||
|
||||
case strings.HasPrefix(col.GetHeader(), "VMRSS-MB-"):
|
||||
case strings.HasPrefix(hd, "VMRSS-MB-"):
|
||||
// VMRSS-NUM-NUM was converted to VMRSS-MB-1, VMRSS-MB-2, VMRSS-MB-3
|
||||
vmrssMBSum += vv
|
||||
case strings.HasPrefix(hd, "CPU-"):
|
||||
case strings.HasPrefix(hd, "CPU-"):
|
||||
case strings.HasPrefix(hd, "CPU-"):
|
||||
case strings.HasPrefix(hd, "CPU-"):
|
||||
case strings.HasPrefix(hd, "CPU-"):
|
||||
case strings.HasPrefix(hd, "CPU-"):
|
||||
case strings.HasPrefix(hd, "CPU-"):
|
||||
case strings.HasPrefix(hd, "CPU-"):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,3 +28,8 @@ var configPath string
|
|||
func init() {
|
||||
Command.PersistentFlags().StringVarP(&configPath, "config", "c", "", "YAML configuration file path.")
|
||||
}
|
||||
|
||||
func commandFunc(cmd *cobra.Command, args []string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
hash: 06fcd602dbcf0dc12fd38949f48631affe37b90d830b7f670f4ae393ca833053
|
||||
updated: 2017-01-10T16:07:51.437343437-08:00
|
||||
hash: 8bc786fe943af445dd2a394c1bfe63cc4e7898621cda24357f5ea96a8e84432b
|
||||
updated: 2017-01-11T16:26:15.754489142-08:00
|
||||
imports:
|
||||
- name: bitbucket.org/zombiezen/gopdf
|
||||
version: 1c63dc69751bc45441c2ce1f56b631c55294b4d5
|
||||
|
|
@ -25,7 +25,7 @@ imports:
|
|||
- name: github.com/cheggaaa/pb
|
||||
version: 6e9d17711bb763b26b68b3931d47f24c1323abab
|
||||
- name: github.com/coreos/etcd
|
||||
version: f0fa5ec507b9e5fa39612627829189b2984f00bd
|
||||
version: 0df543dbb38c9b4f5bd77868c30aa5520c8089a4
|
||||
subpackages:
|
||||
- auth/authpb
|
||||
- client
|
||||
|
|
@ -94,9 +94,9 @@ imports:
|
|||
- runtime/internal
|
||||
- utilities
|
||||
- name: github.com/gyuho/dataframe
|
||||
version: 573cd728a011e5473510a6a1df0f39023c305e04
|
||||
version: 626846d3e46a45817a2054f5bd5704ac2abf5f6e
|
||||
- name: github.com/gyuho/psn
|
||||
version: 34f04cf9e967ef7afb040e55f0f6cc641c8974ba
|
||||
version: eb796de4d35872441498b7cd63d1a95246776163
|
||||
subpackages:
|
||||
- process
|
||||
- schema
|
||||
|
|
@ -150,7 +150,7 @@ imports:
|
|||
- name: github.com/spf13/pflag
|
||||
version: 5ccb023bc27df288a957c5e994cd44fd19619465
|
||||
- name: github.com/ugorji/go
|
||||
version: 9c7f9b7a2bc3a520f7c7b30b34b7f85f47fe27b6
|
||||
version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74
|
||||
subpackages:
|
||||
- codec
|
||||
- name: golang.org/x/image
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import:
|
|||
- package: github.com/cheggaaa/pb
|
||||
version: 6e9d17711bb763b26b68b3931d47f24c1323abab
|
||||
- package: github.com/coreos/etcd
|
||||
version: f0fa5ec507b9e5fa39612627829189b2984f00bd
|
||||
version: 0df543dbb38c9b4f5bd77868c30aa5520c8089a4
|
||||
subpackages:
|
||||
- auth/authpb
|
||||
- client
|
||||
|
|
@ -51,9 +51,9 @@ import:
|
|||
- vg/vgpdf
|
||||
- vg/vgsvg
|
||||
- package: github.com/gyuho/dataframe
|
||||
version: 573cd728a011e5473510a6a1df0f39023c305e04
|
||||
version: 626846d3e46a45817a2054f5bd5704ac2abf5f6e
|
||||
- package: github.com/gyuho/psn
|
||||
version: 34f04cf9e967ef7afb040e55f0f6cc641c8974ba
|
||||
version: eb796de4d35872441498b7cd63d1a95246776163
|
||||
subpackages:
|
||||
- process
|
||||
- package: github.com/hashicorp/consul
|
||||
|
|
|
|||
|
|
@ -1,21 +1,202 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Gyu-Ho Lee
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
1. Definitions.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
|
|||
|
|
@ -8,29 +8,29 @@ import (
|
|||
|
||||
// Column represents column-based data.
|
||||
type Column interface {
|
||||
// RowNumber returns the number of rows of the Column.
|
||||
RowNumber() int
|
||||
// CountRow returns the number of rows of the Column.
|
||||
CountRow() int
|
||||
|
||||
// GetHeader returns the header of the Column.
|
||||
GetHeader() string
|
||||
// Header returns the header of the Column.
|
||||
Header() string
|
||||
|
||||
// UpdateHeader updates the header of the Column.
|
||||
UpdateHeader(header string)
|
||||
|
||||
// GetValue returns the Value in the row. It returns error if the row
|
||||
// Value returns the Value in the row. It returns error if the row
|
||||
// is out of index range.
|
||||
GetValue(row int) (Value, error)
|
||||
Value(row int) (Value, error)
|
||||
|
||||
// SetValue overwrites the value
|
||||
SetValue(row int, v Value) error
|
||||
// Set overwrites the value
|
||||
Set(row int, v Value) error
|
||||
|
||||
// FindValue finds the first Value, and returns the row number.
|
||||
// FindFirst finds the first Value, and returns the row number.
|
||||
// It returns -1 and false if the value does not exist.
|
||||
FindValue(v Value) (int, bool)
|
||||
FindFirst(v Value) (int, bool)
|
||||
|
||||
// FindLastValue finds the last Value, and returns the row number.
|
||||
// FindLast finds the last Value, and returns the row number.
|
||||
// It returns -1 and false if the value does not exist.
|
||||
FindLastValue(v Value) (int, bool)
|
||||
FindLast(v Value) (int, bool)
|
||||
|
||||
// Front returns the first row Value.
|
||||
Front() (Value, bool)
|
||||
|
|
@ -50,14 +50,14 @@ type Column interface {
|
|||
// PushBack appends the Value to the Column.
|
||||
PushBack(v Value) int
|
||||
|
||||
// DeleteRow deletes a row by index.
|
||||
DeleteRow(row int) (Value, error)
|
||||
// Delete deletes a row by index.
|
||||
Delete(row int) (Value, error)
|
||||
|
||||
// DeleteRows deletes rows by index [start, end).
|
||||
DeleteRows(start, end int) error
|
||||
// Deletes deletes rows by index [start, end).
|
||||
Deletes(start, end int) error
|
||||
|
||||
// KeepRows keeps the rows by index [start, end).
|
||||
KeepRows(start, end int) error
|
||||
// Keep keeps the rows by index [start, end).
|
||||
Keep(start, end int) error
|
||||
|
||||
// PopFront deletes the value at front.
|
||||
PopFront() (Value, bool)
|
||||
|
|
@ -94,6 +94,7 @@ type column struct {
|
|||
data []Value
|
||||
}
|
||||
|
||||
// NewColumn creates a new Column.
|
||||
func NewColumn(hd string) Column {
|
||||
return &column{
|
||||
header: hd,
|
||||
|
|
@ -102,14 +103,14 @@ func NewColumn(hd string) Column {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *column) RowNumber() int {
|
||||
func (c *column) CountRow() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
return c.size
|
||||
}
|
||||
|
||||
func (c *column) GetHeader() string {
|
||||
func (c *column) Header() string {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
|
@ -123,7 +124,7 @@ func (c *column) UpdateHeader(header string) {
|
|||
c.header = header
|
||||
}
|
||||
|
||||
func (c *column) GetValue(row int) (Value, error) {
|
||||
func (c *column) Value(row int) (Value, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
|
@ -133,7 +134,7 @@ func (c *column) GetValue(row int) (Value, error) {
|
|||
return c.data[row], nil
|
||||
}
|
||||
|
||||
func (c *column) SetValue(row int, v Value) error {
|
||||
func (c *column) Set(row int, v Value) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
|
@ -144,7 +145,7 @@ func (c *column) SetValue(row int, v Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *column) FindValue(v Value) (int, bool) {
|
||||
func (c *column) FindFirst(v Value) (int, bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
|
@ -156,7 +157,7 @@ func (c *column) FindValue(v Value) (int, bool) {
|
|||
return -1, false
|
||||
}
|
||||
|
||||
func (c *column) FindLastValue(v Value) (int, bool) {
|
||||
func (c *column) FindLast(v Value) (int, bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
|
@ -246,7 +247,7 @@ func (c *column) PushBack(v Value) int {
|
|||
return c.size
|
||||
}
|
||||
|
||||
func (c *column) DeleteRow(row int) (Value, error) {
|
||||
func (c *column) Delete(row int) (Value, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
|
@ -260,7 +261,7 @@ func (c *column) DeleteRow(row int) (Value, error) {
|
|||
return v, nil
|
||||
}
|
||||
|
||||
func (c *column) DeleteRows(start, end int) error {
|
||||
func (c *column) Deletes(start, end int) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
|
@ -290,7 +291,7 @@ func (c *column) DeleteRows(start, end int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *column) KeepRows(start, end int) error {
|
||||
func (c *column) Keep(start, end int) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
|
|
|||
|
|
@ -9,32 +9,35 @@ import (
|
|||
|
||||
// Frame contains data.
|
||||
type Frame interface {
|
||||
// GetHeader returns the slice of headers in order. Header name is unique among its Frame.
|
||||
GetHeader() []string
|
||||
// Headers returns the slice of headers in order. Header name is unique among its Frame.
|
||||
Headers() []string
|
||||
|
||||
// AddColumn adds a Column to Frame.
|
||||
AddColumn(c Column) error
|
||||
|
||||
// GetColumn returns the Column by its header name.
|
||||
GetColumn(header string) (Column, error)
|
||||
// Column returns the Column by its header name.
|
||||
Column(header string) (Column, error)
|
||||
|
||||
// GetColumns returns all Columns.
|
||||
GetColumns() []Column
|
||||
// Columns returns all Columns.
|
||||
Columns() []Column
|
||||
|
||||
// GetColumnNumber returns the number of Columns in the Frame.
|
||||
GetColumnNumber() int
|
||||
// CountColumn returns the number of Columns in the Frame.
|
||||
CountColumn() int
|
||||
|
||||
// UpdateHeader updates the header name of a Column.
|
||||
UpdateHeader(origHeader, newHeader string) error
|
||||
|
||||
// MoveColumn moves the column right before the target index.
|
||||
MoveColumn(header string, target int) error
|
||||
|
||||
// DeleteColumn deletes the Column by its header.
|
||||
DeleteColumn(header string) bool
|
||||
|
||||
// ToCSV saves the Frame to a CSV file.
|
||||
ToCSV(fpath string) error
|
||||
// CSV saves the Frame to a CSV file.
|
||||
CSV(fpath string) error
|
||||
|
||||
// ToRows returns the header and data slices.
|
||||
ToRows() ([]string, [][]string)
|
||||
// Rows returns the header and data slices.
|
||||
Rows() ([]string, [][]string)
|
||||
|
||||
// Sort sorts the Frame.
|
||||
Sort(header string, st SortType, so SortOption) error
|
||||
|
|
@ -46,6 +49,7 @@ type frame struct {
|
|||
headerTo map[string]int
|
||||
}
|
||||
|
||||
// New returns a new Frame.
|
||||
func New() Frame {
|
||||
return &frame{
|
||||
columns: []Column{},
|
||||
|
|
@ -53,6 +57,7 @@ func New() Frame {
|
|||
}
|
||||
}
|
||||
|
||||
// NewFromRows creates Frame from rows.
|
||||
func NewFromRows(header []string, rows [][]string) (Frame, error) {
|
||||
if len(rows) < 1 {
|
||||
return nil, fmt.Errorf("empty row %q", rows)
|
||||
|
|
@ -119,6 +124,7 @@ func NewFromRows(header []string, rows [][]string) (Frame, error) {
|
|||
return fr, nil
|
||||
}
|
||||
|
||||
// NewFromCSV creates a new Frame from CSV.
|
||||
func NewFromCSV(header []string, fpath string) (Frame, error) {
|
||||
f, err := os.OpenFile(fpath, os.O_RDONLY, 0444)
|
||||
if err != nil {
|
||||
|
|
@ -139,7 +145,7 @@ func NewFromCSV(header []string, fpath string) (Frame, error) {
|
|||
return NewFromRows(header, rows)
|
||||
}
|
||||
|
||||
func (f *frame) GetHeader() []string {
|
||||
func (f *frame) Headers() []string {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
|
|
@ -154,7 +160,7 @@ func (f *frame) AddColumn(c Column) error {
|
|||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
header := c.GetHeader()
|
||||
header := c.Header()
|
||||
if _, ok := f.headerTo[header]; ok {
|
||||
return fmt.Errorf("%q already exists", header)
|
||||
}
|
||||
|
|
@ -163,7 +169,7 @@ func (f *frame) AddColumn(c Column) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *frame) GetColumn(header string) (Column, error) {
|
||||
func (f *frame) Column(header string) (Column, error) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
|
|
@ -174,14 +180,14 @@ func (f *frame) GetColumn(header string) (Column, error) {
|
|||
return f.columns[idx], nil
|
||||
}
|
||||
|
||||
func (f *frame) GetColumns() []Column {
|
||||
func (f *frame) Columns() []Column {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
return f.columns
|
||||
}
|
||||
|
||||
func (f *frame) GetColumnNumber() int {
|
||||
func (f *frame) CountColumn() int {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
|
|
@ -205,6 +211,77 @@ func (f *frame) UpdateHeader(origHeader, newHeader string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *frame) MoveColumn(header string, target int) error {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
if target < 0 || target > len(f.headerTo) {
|
||||
return fmt.Errorf("%d is out of range", target)
|
||||
}
|
||||
|
||||
oldi, ok := f.headerTo[header]
|
||||
if !ok {
|
||||
return fmt.Errorf("%q does not exist", header)
|
||||
}
|
||||
if target == oldi {
|
||||
// no need to insert
|
||||
return nil
|
||||
}
|
||||
|
||||
var copied []Column
|
||||
switch {
|
||||
case target < oldi: // move somewhere to left
|
||||
// e.g. arr1, oldi 7, target 2
|
||||
// 0 1 | 2 3 4 5 6 [7] 8 9
|
||||
// 1. copy[:2]
|
||||
// 2. arr2[2] = arr1[7]
|
||||
// 3. copy[3:7]
|
||||
// 4. copy[8:]
|
||||
copied = make([]Column, target)
|
||||
if target == 0 {
|
||||
copied = []Column{}
|
||||
} else {
|
||||
copy(copied, f.columns[:target])
|
||||
}
|
||||
copied = append(copied, f.columns[oldi])
|
||||
// at this point, moved until 'target' index
|
||||
for i, c := range f.columns {
|
||||
if i < target || i == oldi { // already moved
|
||||
continue
|
||||
}
|
||||
copied = append(copied, c)
|
||||
}
|
||||
|
||||
case oldi < target: // move somewhere to right
|
||||
// e.g. arr1, oldi 2, target 8
|
||||
// 0 1 [2] 3 4 5 6 7 | 8 9
|
||||
// 1. copy[:2]
|
||||
// 2. copy[3:8]
|
||||
// 3. arr2[7] = arr1[2]
|
||||
// 4. copy[8:]
|
||||
copied = make([]Column, oldi)
|
||||
if oldi == 0 {
|
||||
copied = []Column{}
|
||||
} else {
|
||||
copy(copied, f.columns[:oldi])
|
||||
}
|
||||
copied = append(copied, f.columns[oldi+1:target]...)
|
||||
for i, c := range f.columns {
|
||||
if i != oldi && i < target { // already moved
|
||||
continue
|
||||
}
|
||||
copied = append(copied, c)
|
||||
}
|
||||
}
|
||||
f.columns = copied
|
||||
|
||||
// update column index
|
||||
for i, col := range f.columns {
|
||||
f.headerTo[col.Header()] = i
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *frame) DeleteColumn(header string) bool {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
|
@ -225,12 +302,12 @@ func (f *frame) DeleteColumn(header string) bool {
|
|||
// update headerTo
|
||||
f.headerTo = make(map[string]int)
|
||||
for i, c := range f.columns {
|
||||
f.headerTo[c.GetHeader()] = i
|
||||
f.headerTo[c.Header()] = i
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *frame) ToRows() ([]string, [][]string) {
|
||||
func (f *frame) Rows() ([]string, [][]string) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
|
|
@ -241,7 +318,7 @@ func (f *frame) ToRows() ([]string, [][]string) {
|
|||
|
||||
var rowN int
|
||||
for _, col := range f.columns {
|
||||
n := col.RowNumber()
|
||||
n := col.CountRow()
|
||||
if rowN < n {
|
||||
rowN = n
|
||||
}
|
||||
|
|
@ -252,10 +329,10 @@ func (f *frame) ToRows() ([]string, [][]string) {
|
|||
for rowIdx := 0; rowIdx < rowN; rowIdx++ {
|
||||
row := make([]string, colN)
|
||||
for colIdx, col := range f.columns { // rowIdx * colIdx
|
||||
v, err := col.GetValue(rowIdx)
|
||||
v, err := col.Value(rowIdx)
|
||||
var elem string
|
||||
if err == nil {
|
||||
elem, _ = v.ToString()
|
||||
elem, _ = v.String()
|
||||
}
|
||||
row[colIdx] = elem
|
||||
}
|
||||
|
|
@ -265,7 +342,7 @@ func (f *frame) ToRows() ([]string, [][]string) {
|
|||
return headers, rows
|
||||
}
|
||||
|
||||
func (f *frame) ToCSV(fpath string) error {
|
||||
func (f *frame) CSV(fpath string) error {
|
||||
fi, err := os.OpenFile(fpath, os.O_RDWR|os.O_TRUNC, 0777)
|
||||
if err != nil {
|
||||
fi, err = os.Create(fpath)
|
||||
|
|
@ -277,7 +354,7 @@ func (f *frame) ToCSV(fpath string) error {
|
|||
|
||||
wr := csv.NewWriter(fi)
|
||||
|
||||
headers, rows := f.ToRows()
|
||||
headers, rows := f.Rows()
|
||||
if err := wr.Write(headers); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -330,7 +407,7 @@ func (f *frame) Sort(header string, st SortType, so SortOption) error {
|
|||
}
|
||||
}
|
||||
|
||||
headers, rows := f.ToRows()
|
||||
headers, rows := f.Rows()
|
||||
SortBy(
|
||||
rows,
|
||||
lesses...,
|
||||
|
|
|
|||
|
|
@ -8,17 +8,17 @@ import (
|
|||
|
||||
// Value represents the value in data frame.
|
||||
type Value interface {
|
||||
// ToString parses Value to string. It returns false if not possible.
|
||||
ToString() (string, bool)
|
||||
// String parses Value to string. It returns false if not possible.
|
||||
String() (string, bool)
|
||||
|
||||
// ToNumber parses Value to float64. It returns false if not possible.
|
||||
ToNumber() (float64, bool)
|
||||
// Number parses Value to float64. It returns false if not possible.
|
||||
Number() (float64, bool)
|
||||
|
||||
// ToTime parses Value to time.Time based on the layout. It returns false if not possible.
|
||||
ToTime(layout string) (time.Time, bool)
|
||||
// Time parses Value to time.Time based on the layout. It returns false if not possible.
|
||||
Time(layout string) (time.Time, bool)
|
||||
|
||||
// ToDuration parses Value to time.Duration. It returns false if not possible.
|
||||
ToDuration() (time.Duration, bool)
|
||||
// Duration parses Value to time.Duration. It returns false if not possible.
|
||||
Duration() (time.Duration, bool)
|
||||
|
||||
// IsNil returns true if the Value is nil.
|
||||
IsNil() bool
|
||||
|
|
@ -51,21 +51,21 @@ func NewStringValueNil() Value {
|
|||
|
||||
type String string
|
||||
|
||||
func (s String) ToString() (string, bool) {
|
||||
func (s String) String() (string, bool) {
|
||||
return string(s), true
|
||||
}
|
||||
|
||||
func (s String) ToNumber() (float64, bool) {
|
||||
func (s String) Number() (float64, bool) {
|
||||
f, err := strconv.ParseFloat(string(s), 64)
|
||||
return f, err == nil
|
||||
}
|
||||
|
||||
func (s String) ToTime(layout string) (time.Time, bool) {
|
||||
func (s String) Time(layout string) (time.Time, bool) {
|
||||
t, err := time.Parse(layout, string(s))
|
||||
return t, err == nil
|
||||
}
|
||||
|
||||
func (s String) ToDuration() (time.Duration, bool) {
|
||||
func (s String) Duration() (time.Duration, bool) {
|
||||
d, err := time.ParseDuration(string(s))
|
||||
return d, err == nil
|
||||
}
|
||||
|
|
@ -90,8 +90,8 @@ func (vs ByStringAscending) Swap(i, j int) {
|
|||
}
|
||||
|
||||
func (vs ByStringAscending) Less(i, j int) bool {
|
||||
vs1, _ := vs[i].ToString()
|
||||
vs2, _ := vs[j].ToString()
|
||||
vs1, _ := vs[i].String()
|
||||
vs2, _ := vs[j].String()
|
||||
return vs1 < vs2
|
||||
}
|
||||
|
||||
|
|
@ -106,8 +106,8 @@ func (vs ByStringDescending) Swap(i, j int) {
|
|||
}
|
||||
|
||||
func (vs ByStringDescending) Less(i, j int) bool {
|
||||
vs1, _ := vs[i].ToString()
|
||||
vs2, _ := vs[j].ToString()
|
||||
vs1, _ := vs[i].String()
|
||||
vs2, _ := vs[j].String()
|
||||
return vs1 > vs2
|
||||
}
|
||||
|
||||
|
|
@ -122,8 +122,8 @@ func (vs ByNumberAscending) Swap(i, j int) {
|
|||
}
|
||||
|
||||
func (vs ByNumberAscending) Less(i, j int) bool {
|
||||
vs1, _ := vs[i].ToNumber()
|
||||
vs2, _ := vs[j].ToNumber()
|
||||
vs1, _ := vs[i].Number()
|
||||
vs2, _ := vs[j].Number()
|
||||
return vs1 < vs2
|
||||
}
|
||||
|
||||
|
|
@ -138,8 +138,8 @@ func (vs ByNumberDescending) Swap(i, j int) {
|
|||
}
|
||||
|
||||
func (vs ByNumberDescending) Less(i, j int) bool {
|
||||
vs1, _ := vs[i].ToNumber()
|
||||
vs2, _ := vs[j].ToNumber()
|
||||
vs1, _ := vs[i].Number()
|
||||
vs2, _ := vs[j].Number()
|
||||
return vs1 > vs2
|
||||
}
|
||||
|
||||
|
|
@ -154,8 +154,8 @@ func (vs ByDurationAscending) Swap(i, j int) {
|
|||
}
|
||||
|
||||
func (vs ByDurationAscending) Less(i, j int) bool {
|
||||
vs1, _ := vs[i].ToDuration()
|
||||
vs2, _ := vs[j].ToDuration()
|
||||
vs1, _ := vs[i].Duration()
|
||||
vs2, _ := vs[j].Duration()
|
||||
return vs1 < vs2
|
||||
}
|
||||
|
||||
|
|
@ -170,7 +170,7 @@ func (vs ByDurationDescending) Swap(i, j int) {
|
|||
}
|
||||
|
||||
func (vs ByDurationDescending) Less(i, j int) bool {
|
||||
vs1, _ := vs[i].ToDuration()
|
||||
vs2, _ := vs[j].ToDuration()
|
||||
vs1, _ := vs[i].Duration()
|
||||
vs2, _ := vs[j].Duration()
|
||||
return vs1 > vs2
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,19 +12,19 @@ type Proc struct {
|
|||
|
||||
PSEntry PSEntry
|
||||
|
||||
DSEntry DSEntry
|
||||
ReadsCompletedDiff uint64
|
||||
SectorsReadDiff uint64
|
||||
WritesCompletedDiff uint64
|
||||
SectorsWrittenDiff uint64
|
||||
DSEntry DSEntry
|
||||
ReadsCompletedDelta uint64
|
||||
SectorsReadDelta uint64
|
||||
WritesCompletedDelta uint64
|
||||
SectorsWrittenDelta uint64
|
||||
|
||||
NSEntry NSEntry
|
||||
ReceiveBytesDiff string
|
||||
ReceivePacketsDiff uint64
|
||||
TransmitBytesDiff string
|
||||
TransmitPacketsDiff uint64
|
||||
ReceiveBytesNumDiff uint64
|
||||
TransmitBytesNumDiff uint64
|
||||
NSEntry NSEntry
|
||||
ReceiveBytesDelta string
|
||||
ReceivePacketsDelta uint64
|
||||
TransmitBytesDelta string
|
||||
TransmitPacketsDelta uint64
|
||||
ReceiveBytesNumDelta uint64
|
||||
TransmitBytesNumDelta uint64
|
||||
|
||||
// Extra exists to support customized data query.
|
||||
Extra []byte
|
||||
|
|
@ -146,17 +146,17 @@ func init() {
|
|||
ProcHeader = append(ProcHeader, columnsDSEntry...)
|
||||
ProcHeader = append(ProcHeader, columnsNSEntry...)
|
||||
ProcHeader = append(ProcHeader,
|
||||
"READS-COMPLETED-DIFF",
|
||||
"SECTORS-READ-DIFF",
|
||||
"WRITES-COMPLETED-DIFF",
|
||||
"SECTORS-WRITTEN-DIFF",
|
||||
"READS-COMPLETED-DELTA",
|
||||
"SECTORS-READ-DELTA",
|
||||
"WRITES-COMPLETED-DELTA",
|
||||
"SECTORS-WRITTEN-DELTA",
|
||||
|
||||
"RECEIVE-BYTES-DIFF",
|
||||
"RECEIVE-PACKETS-DIFF",
|
||||
"TRANSMIT-BYTES-DIFF",
|
||||
"TRANSMIT-PACKETS-DIFF",
|
||||
"RECEIVE-BYTES-NUM-DIFF",
|
||||
"TRANSMIT-BYTES-NUM-DIFF",
|
||||
"RECEIVE-BYTES-DELTA",
|
||||
"RECEIVE-PACKETS-DELTA",
|
||||
"TRANSMIT-BYTES-DELTA",
|
||||
"TRANSMIT-PACKETS-DELTA",
|
||||
"RECEIVE-BYTES-NUM-DELTA",
|
||||
"TRANSMIT-BYTES-NUM-DELTA",
|
||||
|
||||
"EXTRA",
|
||||
)
|
||||
|
|
@ -202,17 +202,17 @@ func (p *Proc) ToRow() (row []string) {
|
|||
row[27] = fmt.Sprintf("%d", p.NSEntry.ReceiveBytesNum) // RECEIVE-BYTES-NUM
|
||||
row[28] = fmt.Sprintf("%d", p.NSEntry.TransmitBytesNum) // TRANSMIT-BYTES-NUM
|
||||
|
||||
row[29] = fmt.Sprintf("%d", p.ReadsCompletedDiff) // READS-COMPLETED-DIFF
|
||||
row[30] = fmt.Sprintf("%d", p.SectorsReadDiff) // SECTORS-READ-DIFF
|
||||
row[31] = fmt.Sprintf("%d", p.WritesCompletedDiff) // WRITES-COMPLETED-DIFF
|
||||
row[32] = fmt.Sprintf("%d", p.SectorsWrittenDiff) // SECTORS-WRITTEN-DIFF
|
||||
row[29] = fmt.Sprintf("%d", p.ReadsCompletedDelta) // READS-COMPLETED-DELTA
|
||||
row[30] = fmt.Sprintf("%d", p.SectorsReadDelta) // SECTORS-READ-DELTA
|
||||
row[31] = fmt.Sprintf("%d", p.WritesCompletedDelta) // WRITES-COMPLETED-DELTA
|
||||
row[32] = fmt.Sprintf("%d", p.SectorsWrittenDelta) // SECTORS-WRITTEN-DELTA
|
||||
|
||||
row[33] = p.ReceiveBytesDiff // RECEIVE-BYTES-DIFF
|
||||
row[34] = fmt.Sprintf("%d", p.ReceivePacketsDiff) // RECEIVE-PACKETS-DIFF
|
||||
row[35] = p.TransmitBytesDiff // TRANSMIT-BYTES-DIFF
|
||||
row[36] = fmt.Sprintf("%d", p.TransmitPacketsDiff) // TRANSMIT-PACKETS-DIFF
|
||||
row[37] = fmt.Sprintf("%d", p.ReceiveBytesNumDiff) // RECEIVE-BYTES-NUM-DIFF
|
||||
row[38] = fmt.Sprintf("%d", p.TransmitBytesNumDiff) // TRANSMIT-BYTES-NUM-DIFF
|
||||
row[33] = p.ReceiveBytesDelta // RECEIVE-BYTES-DELTA
|
||||
row[34] = fmt.Sprintf("%d", p.ReceivePacketsDelta) // RECEIVE-PACKETS-DELTA
|
||||
row[35] = p.TransmitBytesDelta // TRANSMIT-BYTES-DELTA
|
||||
row[36] = fmt.Sprintf("%d", p.TransmitPacketsDelta) // TRANSMIT-PACKETS-DELTA
|
||||
row[37] = fmt.Sprintf("%d", p.ReceiveBytesNumDelta) // RECEIVE-BYTES-NUM-DELTA
|
||||
row[38] = fmt.Sprintf("%d", p.TransmitBytesNumDelta) // TRANSMIT-BYTES-NUM-DELTA
|
||||
|
||||
row[39] = string(p.Extra) // EXTRA
|
||||
|
||||
|
|
|
|||
|
|
@ -79,26 +79,26 @@ func (c *CSV) Add() error {
|
|||
c.MaxUnixTS = cur.UnixTS
|
||||
|
||||
if cur.UnixTS-prev.UnixTS == 1 {
|
||||
cur.ReadsCompletedDiff = cur.DSEntry.ReadsCompleted - prev.DSEntry.ReadsCompleted
|
||||
cur.SectorsReadDiff = cur.DSEntry.SectorsRead - prev.DSEntry.SectorsRead
|
||||
cur.WritesCompletedDiff = cur.DSEntry.WritesCompleted - prev.DSEntry.WritesCompleted
|
||||
cur.SectorsWrittenDiff = cur.DSEntry.SectorsWritten - prev.DSEntry.SectorsWritten
|
||||
cur.ReadsCompletedDelta = cur.DSEntry.ReadsCompleted - prev.DSEntry.ReadsCompleted
|
||||
cur.SectorsReadDelta = cur.DSEntry.SectorsRead - prev.DSEntry.SectorsRead
|
||||
cur.WritesCompletedDelta = cur.DSEntry.WritesCompleted - prev.DSEntry.WritesCompleted
|
||||
cur.SectorsWrittenDelta = cur.DSEntry.SectorsWritten - prev.DSEntry.SectorsWritten
|
||||
|
||||
cur.ReceiveBytesNumDiff = cur.NSEntry.ReceiveBytesNum - prev.NSEntry.ReceiveBytesNum
|
||||
cur.TransmitBytesNumDiff = cur.NSEntry.TransmitBytesNum - prev.NSEntry.TransmitBytesNum
|
||||
cur.ReceivePacketsDiff = cur.NSEntry.ReceivePackets - prev.NSEntry.ReceivePackets
|
||||
cur.TransmitPacketsDiff = cur.NSEntry.TransmitPackets - prev.NSEntry.TransmitPackets
|
||||
cur.ReceiveBytesNumDelta = cur.NSEntry.ReceiveBytesNum - prev.NSEntry.ReceiveBytesNum
|
||||
cur.TransmitBytesNumDelta = cur.NSEntry.TransmitBytesNum - prev.NSEntry.TransmitBytesNum
|
||||
cur.ReceivePacketsDelta = cur.NSEntry.ReceivePackets - prev.NSEntry.ReceivePackets
|
||||
cur.TransmitPacketsDelta = cur.NSEntry.TransmitPackets - prev.NSEntry.TransmitPackets
|
||||
|
||||
cur.ReceiveBytesDiff = humanize.Bytes(cur.ReceiveBytesNumDiff)
|
||||
cur.TransmitBytesDiff = humanize.Bytes(cur.TransmitBytesNumDiff)
|
||||
cur.ReceiveBytesDelta = humanize.Bytes(cur.ReceiveBytesNumDelta)
|
||||
cur.TransmitBytesDelta = humanize.Bytes(cur.TransmitBytesNumDelta)
|
||||
|
||||
c.Rows = append(c.Rows, cur)
|
||||
return nil
|
||||
}
|
||||
|
||||
// there are empty rows between; estimate and fill-in
|
||||
tsDiff := cur.UnixTS - prev.UnixTS
|
||||
nexts := make([]Proc, 0, tsDiff+1)
|
||||
tsDelta := cur.UnixTS - prev.UnixTS
|
||||
nexts := make([]Proc, 0, tsDelta+1)
|
||||
|
||||
// estimate the previous ones based on 'prev' and 'cur'
|
||||
mid := prev
|
||||
|
|
@ -117,59 +117,59 @@ func (c *CSV) Add() error {
|
|||
mid.PSEntry.VMSize = humanize.Bytes(mid.PSEntry.VMSizeNum)
|
||||
|
||||
// DSEntry; calculate delta assuming that metrics are cumulative
|
||||
mid.ReadsCompletedDiff = (cur.DSEntry.ReadsCompleted - prev.DSEntry.ReadsCompleted) / uint64(tsDiff)
|
||||
mid.SectorsReadDiff = (cur.DSEntry.SectorsRead - prev.DSEntry.SectorsRead) / uint64(tsDiff)
|
||||
mid.WritesCompletedDiff = (cur.DSEntry.WritesCompleted - prev.DSEntry.WritesCompleted) / uint64(tsDiff)
|
||||
mid.SectorsWrittenDiff = (cur.DSEntry.SectorsWritten - prev.DSEntry.SectorsWritten) / uint64(tsDiff)
|
||||
timeSpentOnReadingMsDelta := (cur.DSEntry.TimeSpentOnReadingMs - prev.DSEntry.TimeSpentOnReadingMs) / uint64(tsDiff)
|
||||
timeSpentOnWritingMsDelta := (cur.DSEntry.TimeSpentOnWritingMs - prev.DSEntry.TimeSpentOnWritingMs) / uint64(tsDiff)
|
||||
mid.ReadsCompletedDelta = (cur.DSEntry.ReadsCompleted - prev.DSEntry.ReadsCompleted) / uint64(tsDelta)
|
||||
mid.SectorsReadDelta = (cur.DSEntry.SectorsRead - prev.DSEntry.SectorsRead) / uint64(tsDelta)
|
||||
mid.WritesCompletedDelta = (cur.DSEntry.WritesCompleted - prev.DSEntry.WritesCompleted) / uint64(tsDelta)
|
||||
mid.SectorsWrittenDelta = (cur.DSEntry.SectorsWritten - prev.DSEntry.SectorsWritten) / uint64(tsDelta)
|
||||
timeSpentOnReadingMsDelta := (cur.DSEntry.TimeSpentOnReadingMs - prev.DSEntry.TimeSpentOnReadingMs) / uint64(tsDelta)
|
||||
timeSpentOnWritingMsDelta := (cur.DSEntry.TimeSpentOnWritingMs - prev.DSEntry.TimeSpentOnWritingMs) / uint64(tsDelta)
|
||||
|
||||
// NSEntry; calculate delta assuming that metrics are cumulative
|
||||
mid.ReceiveBytesNumDiff = (cur.NSEntry.ReceiveBytesNum - prev.NSEntry.ReceiveBytesNum) / uint64(tsDiff)
|
||||
mid.ReceiveBytesDiff = humanize.Bytes(mid.ReceiveBytesNumDiff)
|
||||
mid.ReceivePacketsDiff = (cur.NSEntry.ReceivePackets - prev.NSEntry.ReceivePackets) / uint64(tsDiff)
|
||||
mid.TransmitBytesNumDiff = (cur.NSEntry.TransmitBytesNum - prev.NSEntry.TransmitBytesNum) / uint64(tsDiff)
|
||||
mid.TransmitBytesDiff = humanize.Bytes(mid.TransmitBytesNumDiff)
|
||||
mid.TransmitPacketsDiff = (cur.NSEntry.TransmitPackets - prev.NSEntry.TransmitPackets) / uint64(tsDiff)
|
||||
mid.ReceiveBytesNumDelta = (cur.NSEntry.ReceiveBytesNum - prev.NSEntry.ReceiveBytesNum) / uint64(tsDelta)
|
||||
mid.ReceiveBytesDelta = humanize.Bytes(mid.ReceiveBytesNumDelta)
|
||||
mid.ReceivePacketsDelta = (cur.NSEntry.ReceivePackets - prev.NSEntry.ReceivePackets) / uint64(tsDelta)
|
||||
mid.TransmitBytesNumDelta = (cur.NSEntry.TransmitBytesNum - prev.NSEntry.TransmitBytesNum) / uint64(tsDelta)
|
||||
mid.TransmitBytesDelta = humanize.Bytes(mid.TransmitBytesNumDelta)
|
||||
mid.TransmitPacketsDelta = (cur.NSEntry.TransmitPackets - prev.NSEntry.TransmitPackets) / uint64(tsDelta)
|
||||
|
||||
for i := int64(1); i < tsDiff; i++ {
|
||||
for i := int64(1); i < tsDelta; i++ {
|
||||
ev := mid
|
||||
ev.UnixTS = prev.UnixTS + i
|
||||
|
||||
ev.DSEntry.ReadsCompleted += mid.ReadsCompletedDiff * uint64(i)
|
||||
ev.DSEntry.SectorsRead += mid.SectorsReadDiff * uint64(i)
|
||||
ev.DSEntry.WritesCompleted += mid.WritesCompletedDiff * uint64(i)
|
||||
ev.DSEntry.SectorsWritten += mid.SectorsWrittenDiff * uint64(i)
|
||||
ev.DSEntry.ReadsCompleted += mid.ReadsCompletedDelta * uint64(i)
|
||||
ev.DSEntry.SectorsRead += mid.SectorsReadDelta * uint64(i)
|
||||
ev.DSEntry.WritesCompleted += mid.WritesCompletedDelta * uint64(i)
|
||||
ev.DSEntry.SectorsWritten += mid.SectorsWrittenDelta * uint64(i)
|
||||
ev.DSEntry.TimeSpentOnReadingMs += timeSpentOnReadingMsDelta * uint64(i)
|
||||
ev.DSEntry.TimeSpentOnWritingMs += timeSpentOnWritingMsDelta * uint64(i)
|
||||
ev.DSEntry.TimeSpentOnReading = humanizeDurationMs(ev.DSEntry.TimeSpentOnReadingMs)
|
||||
ev.DSEntry.TimeSpentOnWriting = humanizeDurationMs(ev.DSEntry.TimeSpentOnWritingMs)
|
||||
|
||||
ev.NSEntry.ReceiveBytesNum += mid.ReceiveBytesNumDiff * uint64(i)
|
||||
ev.NSEntry.ReceiveBytesNum += mid.ReceiveBytesNumDelta * uint64(i)
|
||||
ev.NSEntry.ReceiveBytes = humanize.Bytes(ev.NSEntry.ReceiveBytesNum)
|
||||
ev.NSEntry.ReceivePackets += mid.ReceivePacketsDiff * uint64(i)
|
||||
ev.NSEntry.TransmitBytesNum += mid.TransmitBytesNumDiff * uint64(i)
|
||||
ev.NSEntry.ReceivePackets += mid.ReceivePacketsDelta * uint64(i)
|
||||
ev.NSEntry.TransmitBytesNum += mid.TransmitBytesNumDelta * uint64(i)
|
||||
ev.NSEntry.TransmitBytes = humanize.Bytes(ev.NSEntry.TransmitBytesNum)
|
||||
ev.NSEntry.TransmitPackets += mid.TransmitPacketsDiff * uint64(i)
|
||||
ev.NSEntry.TransmitPackets += mid.TransmitPacketsDelta * uint64(i)
|
||||
|
||||
nexts = append(nexts, ev)
|
||||
}
|
||||
|
||||
// now previous entry is estimated; update 'cur' diff metrics
|
||||
// now previous entry is estimated; update 'cur' Delta metrics
|
||||
realPrev := nexts[len(nexts)-1]
|
||||
|
||||
cur.ReadsCompletedDiff = cur.DSEntry.ReadsCompleted - realPrev.DSEntry.ReadsCompleted
|
||||
cur.SectorsReadDiff = cur.DSEntry.SectorsRead - realPrev.DSEntry.SectorsRead
|
||||
cur.WritesCompletedDiff = cur.DSEntry.WritesCompleted - realPrev.DSEntry.WritesCompleted
|
||||
cur.SectorsWrittenDiff = cur.DSEntry.SectorsWritten - realPrev.DSEntry.SectorsWritten
|
||||
cur.ReadsCompletedDelta = cur.DSEntry.ReadsCompleted - realPrev.DSEntry.ReadsCompleted
|
||||
cur.SectorsReadDelta = cur.DSEntry.SectorsRead - realPrev.DSEntry.SectorsRead
|
||||
cur.WritesCompletedDelta = cur.DSEntry.WritesCompleted - realPrev.DSEntry.WritesCompleted
|
||||
cur.SectorsWrittenDelta = cur.DSEntry.SectorsWritten - realPrev.DSEntry.SectorsWritten
|
||||
|
||||
cur.ReceiveBytesNumDiff = cur.NSEntry.ReceiveBytesNum - realPrev.NSEntry.ReceiveBytesNum
|
||||
cur.TransmitBytesNumDiff = cur.NSEntry.TransmitBytesNum - realPrev.NSEntry.TransmitBytesNum
|
||||
cur.ReceivePacketsDiff = cur.NSEntry.ReceivePackets - realPrev.NSEntry.ReceivePackets
|
||||
cur.TransmitPacketsDiff = cur.NSEntry.TransmitPackets - realPrev.NSEntry.TransmitPackets
|
||||
cur.ReceiveBytesNumDelta = cur.NSEntry.ReceiveBytesNum - realPrev.NSEntry.ReceiveBytesNum
|
||||
cur.TransmitBytesNumDelta = cur.NSEntry.TransmitBytesNum - realPrev.NSEntry.TransmitBytesNum
|
||||
cur.ReceivePacketsDelta = cur.NSEntry.ReceivePackets - realPrev.NSEntry.ReceivePackets
|
||||
cur.TransmitPacketsDelta = cur.NSEntry.TransmitPackets - realPrev.NSEntry.TransmitPackets
|
||||
|
||||
cur.ReceiveBytesDiff = humanize.Bytes(cur.ReceiveBytesNumDiff)
|
||||
cur.TransmitBytesDiff = humanize.Bytes(cur.TransmitBytesNumDiff)
|
||||
cur.ReceiveBytesDelta = humanize.Bytes(cur.ReceiveBytesNumDelta)
|
||||
cur.TransmitBytesDelta = humanize.Bytes(cur.TransmitBytesNumDelta)
|
||||
|
||||
c.Rows = append(c.Rows, append(nexts, cur)...)
|
||||
return nil
|
||||
|
|
@ -210,7 +210,7 @@ func ReadCSV(fpath string) (*CSV, error) {
|
|||
|
||||
rd := csv.NewReader(f)
|
||||
|
||||
// in case that rows have different number of fields
|
||||
// in case that rows have Deltaerent number of fields
|
||||
rd.FieldsPerRecord = -1
|
||||
|
||||
rows, err := rd.ReadAll()
|
||||
|
|
@ -306,19 +306,19 @@ func ReadCSV(fpath string) (*CSV, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
readsCompletedDiff, err := strconv.ParseUint(row[ProcHeaderIndex["READS-COMPLETED-DIFF"]], 10, 64)
|
||||
readsCompletedDelta, err := strconv.ParseUint(row[ProcHeaderIndex["READS-COMPLETED-DELTA"]], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sectorsReadDiff, err := strconv.ParseUint(row[ProcHeaderIndex["SECTORS-READ-DIFF"]], 10, 64)
|
||||
sectorsReadDelta, err := strconv.ParseUint(row[ProcHeaderIndex["SECTORS-READ-DELTA"]], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
writesCompletedDiff, err := strconv.ParseUint(row[ProcHeaderIndex["WRITES-COMPLETED-DIFF"]], 10, 64)
|
||||
writesCompletedDelta, err := strconv.ParseUint(row[ProcHeaderIndex["WRITES-COMPLETED-DELTA"]], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sectorsWrittenDiff, err := strconv.ParseUint(row[ProcHeaderIndex["SECTORS-WRITTEN-DIFF"]], 10, 64)
|
||||
sectorsWrittenDelta, err := strconv.ParseUint(row[ProcHeaderIndex["SECTORS-WRITTEN-DELTA"]], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -340,19 +340,19 @@ func ReadCSV(fpath string) (*CSV, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
receivePacketsDiff, err := strconv.ParseUint(row[ProcHeaderIndex["RECEIVE-PACKETS-DIFF"]], 10, 64)
|
||||
receivePacketsDelta, err := strconv.ParseUint(row[ProcHeaderIndex["RECEIVE-PACKETS-DELTA"]], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transmitPacketsDiff, err := strconv.ParseUint(row[ProcHeaderIndex["TRANSMIT-PACKETS-DIFF"]], 10, 64)
|
||||
transmitPacketsDelta, err := strconv.ParseUint(row[ProcHeaderIndex["TRANSMIT-PACKETS-DELTA"]], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
receiveBytesNumDiff, err := strconv.ParseUint(row[ProcHeaderIndex["RECEIVE-BYTES-NUM-DIFF"]], 10, 64)
|
||||
receiveBytesNumDelta, err := strconv.ParseUint(row[ProcHeaderIndex["RECEIVE-BYTES-NUM-DELTA"]], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transmitBytesNumDiff, err := strconv.ParseUint(row[ProcHeaderIndex["TRANSMIT-BYTES-NUM-DIFF"]], 10, 64)
|
||||
transmitBytesNumDelta, err := strconv.ParseUint(row[ProcHeaderIndex["TRANSMIT-BYTES-NUM-DELTA"]], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -385,10 +385,10 @@ func ReadCSV(fpath string) (*CSV, error) {
|
|||
TimeSpentOnReadingMs: timeSpentOnReadingMs,
|
||||
TimeSpentOnWritingMs: timeSpentOnWritingMs,
|
||||
},
|
||||
ReadsCompletedDiff: readsCompletedDiff,
|
||||
SectorsReadDiff: sectorsReadDiff,
|
||||
WritesCompletedDiff: writesCompletedDiff,
|
||||
SectorsWrittenDiff: sectorsWrittenDiff,
|
||||
ReadsCompletedDelta: readsCompletedDelta,
|
||||
SectorsReadDelta: sectorsReadDelta,
|
||||
WritesCompletedDelta: writesCompletedDelta,
|
||||
SectorsWrittenDelta: sectorsWrittenDelta,
|
||||
|
||||
NSEntry: NSEntry{
|
||||
Interface: row[ProcHeaderIndex["INTERFACE"]],
|
||||
|
|
@ -399,12 +399,12 @@ func ReadCSV(fpath string) (*CSV, error) {
|
|||
ReceiveBytesNum: receiveBytesNum,
|
||||
TransmitBytesNum: transmitBytesNum,
|
||||
},
|
||||
ReceiveBytesDiff: row[ProcHeaderIndex["RECEIVE-BYTES-DIFF"]],
|
||||
ReceivePacketsDiff: receivePacketsDiff,
|
||||
TransmitBytesDiff: row[ProcHeaderIndex["TRANSMIT-BYTES-DIFF"]],
|
||||
TransmitPacketsDiff: transmitPacketsDiff,
|
||||
ReceiveBytesNumDiff: receiveBytesNumDiff,
|
||||
TransmitBytesNumDiff: transmitBytesNumDiff,
|
||||
ReceiveBytesDelta: row[ProcHeaderIndex["RECEIVE-BYTES-DELTA"]],
|
||||
ReceivePacketsDelta: receivePacketsDelta,
|
||||
TransmitBytesDelta: row[ProcHeaderIndex["TRANSMIT-BYTES-DELTA"]],
|
||||
TransmitPacketsDelta: transmitPacketsDelta,
|
||||
ReceiveBytesNumDelta: receiveBytesNumDelta,
|
||||
TransmitBytesNumDelta: transmitBytesNumDelta,
|
||||
|
||||
Extra: []byte(row[ProcHeaderIndex["EXTRA"]]),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// // +build ignore
|
||||
/* // +build ignore */
|
||||
|
||||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
|
|
|||
|
|
@ -165,15 +165,9 @@ type genRunner struct {
|
|||
//
|
||||
// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.*
|
||||
func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) {
|
||||
// trim out all types which already implement Selfer
|
||||
typ2 := make([]reflect.Type, 0, len(typ))
|
||||
for _, t := range typ {
|
||||
if reflect.PtrTo(t).Implements(selferTyp) || t.Implements(selferTyp) {
|
||||
continue
|
||||
}
|
||||
typ2 = append(typ2, t)
|
||||
}
|
||||
typ = typ2
|
||||
// All types passed to this method do not have a codec.Selfer method implemented directly.
|
||||
// codecgen already checks the AST and skips any types that define the codec.Selfer methods.
|
||||
// Consequently, there's no need to check and trim them if they implement codec.Selfer
|
||||
|
||||
if len(typ) == 0 {
|
||||
return
|
||||
|
|
|
|||
|
|
@ -347,7 +347,7 @@ func (d *simpleDecDriver) decLen() int {
|
|||
}
|
||||
return int(ui)
|
||||
}
|
||||
d.d.errorf("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8)
|
||||
d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
|
||||
return -1
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue