Merge pull request #235 from gyuho/interpolate

csv: interpolate by Unix second
This commit is contained in:
Gyu-Ho Lee 2017-02-03 02:02:51 -08:00 committed by GitHub
commit ff4a3c54e1
48 changed files with 4334 additions and 1742 deletions

View File

@ -20,6 +20,7 @@ import (
"path/filepath"
"github.com/coreos/dbtester/agent/agentpb"
"github.com/coreos/dbtester/pkg/netutil"
"github.com/coreos/dbtester/pkg/ntp"
"github.com/coreos/pkg/capnslog"
"github.com/gyuho/psn"
@ -28,9 +29,10 @@ import (
)
type flags struct {
agentLog string
databaseLog string
systemMetricsCSV string
agentLog string
databaseLog string
systemMetricsCSV string
systemMetricsCSVInterpolated string
javaExec string
etcdExec string
@ -57,14 +59,20 @@ func init() {
if err != nil {
plog.Warningf("cannot get disk device mounted at '/' (%v)", err)
}
nt, err := psn.GetDefaultInterface()
nm, err := netutil.GetDefaultInterfaces()
if err != nil {
plog.Warningf("cannot detect default network interface (%v)", err)
}
var nt string
for k := range nm {
nt = k
break
}
Command.PersistentFlags().StringVar(&globalFlags.agentLog, "agent-log", filepath.Join(homeDir(), "agent.log"), "agent log path.")
Command.PersistentFlags().StringVar(&globalFlags.databaseLog, "database-log", filepath.Join(homeDir(), "database.log"), "Database log path.")
Command.PersistentFlags().StringVar(&globalFlags.systemMetricsCSV, "system-metrics-csv", filepath.Join(homeDir(), "system-metrics.csv"), "System metrics log path.")
Command.PersistentFlags().StringVar(&globalFlags.systemMetricsCSV, "system-metrics-csv", filepath.Join(homeDir(), "system-metrics.csv"), "Raw system metrics data path.")
Command.PersistentFlags().StringVar(&globalFlags.systemMetricsCSVInterpolated, "system-metrics-csv-interpolated", filepath.Join(homeDir(), "system-metrics-interpolated.csv"), "Interpolated system metrics data path.")
Command.PersistentFlags().StringVar(&globalFlags.javaExec, "java-exec", "/usr/bin/java", "Java executable binary path (needed for Zookeeper).")
Command.PersistentFlags().StringVar(&globalFlags.etcdExec, "etcd-exec", filepath.Join(os.Getenv("GOPATH"), "bin/etcd"), "etcd executable binary path.")

View File

@ -52,11 +52,24 @@ func startMetrics(fs *flags, t *transporterServer) error {
case <-t.uploadSig:
plog.Infof("upload signal received; saving CSV at %q", t.metricsCSV.FilePath)
if err := t.metricsCSV.Save(); err != nil {
plog.Errorf("psn.CSV.Save error %v", err)
plog.Errorf("psn.CSV.Save(%q) error %v", t.metricsCSV.FilePath, err)
} else {
plog.Infof("CSV saved at %q", t.metricsCSV.FilePath)
}
interpolated, err := t.metricsCSV.Interpolate()
if err != nil {
plog.Fatalf("psn.CSV.Interpolate(%q) failed with %v", t.metricsCSV.FilePath, err)
}
interpolated.FilePath = fs.systemMetricsCSVInterpolated
if err := interpolated.Save(); err != nil {
plog.Errorf("psn.CSV.Save(%q) error %v", interpolated.FilePath, err)
} else {
plog.Infof("CSV saved at %q", interpolated.FilePath)
}
close(t.csvReady)
return

View File

@ -32,40 +32,19 @@ func uploadLog(fs *flags, t *transporterServer) error {
return err
}
srcDatabaseLogPath := fs.databaseLog
dstDatabaseLogPath := filepath.Base(fs.databaseLog)
if !strings.HasPrefix(filepath.Base(fs.databaseLog), t.req.TestName) {
dstDatabaseLogPath = fmt.Sprintf("%s-%d-%s", t.req.TestName, t.req.ServerIndex+1, filepath.Base(fs.databaseLog))
}
dstDatabaseLogPath = filepath.Join(t.req.GoogleCloudStorageSubDirectory, dstDatabaseLogPath)
plog.Infof("uploading database log [%q -> %q]", srcDatabaseLogPath, dstDatabaseLogPath)
var uerr error
for k := 0; k < 30; k++ {
if uerr = u.UploadFile(t.req.GoogleCloudStorageBucketName, srcDatabaseLogPath, dstDatabaseLogPath); uerr != nil {
plog.Errorf("UploadFile error... sleep and retry... (%v)", uerr)
time.Sleep(2 * time.Second)
continue
} else {
break
}
}
if uerr != nil {
return uerr
}
if t.req.Database == agentpb.Request_zetcd || t.req.Database == agentpb.Request_cetcd {
dpath := fs.databaseLog + "-" + t.req.Database.String()
srcDatabaseLogPath2 := dpath
dstDatabaseLogPath2 := filepath.Base(dpath)
if !strings.HasPrefix(filepath.Base(dpath), t.req.TestName) {
dstDatabaseLogPath2 = fmt.Sprintf("%s-%d-%s", t.req.TestName, t.req.ServerIndex+1, filepath.Base(dpath))
{
srcDatabaseLogPath := fs.databaseLog
dstDatabaseLogPath := filepath.Base(fs.databaseLog)
if !strings.HasPrefix(filepath.Base(fs.databaseLog), t.req.TestName) {
dstDatabaseLogPath = fmt.Sprintf("%s-%d-%s", t.req.TestName, t.req.ServerIndex+1, filepath.Base(fs.databaseLog))
}
dstDatabaseLogPath2 = filepath.Join(t.req.GoogleCloudStorageSubDirectory, dstDatabaseLogPath2)
plog.Infof("uploading proxy-database log [%q -> %q]", srcDatabaseLogPath2, dstDatabaseLogPath2)
var uerr error
dstDatabaseLogPath = filepath.Join(t.req.GoogleCloudStorageSubDirectory, dstDatabaseLogPath)
plog.Infof("uploading database log [%q -> %q]", srcDatabaseLogPath, dstDatabaseLogPath)
for k := 0; k < 30; k++ {
if uerr = u.UploadFile(t.req.GoogleCloudStorageBucketName, srcDatabaseLogPath2, dstDatabaseLogPath2); uerr != nil {
plog.Errorf("UploadFile error... sleep and retry... (%v)", uerr)
if uerr = u.UploadFile(t.req.GoogleCloudStorageBucketName, srcDatabaseLogPath, dstDatabaseLogPath); uerr != nil {
plog.Warningf("UploadFile error... sleep and retry... (%v)", uerr)
time.Sleep(2 * time.Second)
continue
} else {
@ -77,40 +56,91 @@ func uploadLog(fs *flags, t *transporterServer) error {
}
}
srcMonitorResultPath := fs.systemMetricsCSV
dstMonitorResultPath := filepath.Base(fs.systemMetricsCSV)
if !strings.HasPrefix(filepath.Base(fs.systemMetricsCSV), t.req.TestName) {
dstMonitorResultPath = fmt.Sprintf("%s-%d-%s", t.req.TestName, t.req.ServerIndex+1, filepath.Base(fs.systemMetricsCSV))
}
dstMonitorResultPath = filepath.Join(t.req.GoogleCloudStorageSubDirectory, dstMonitorResultPath)
plog.Infof("uploading monitor results [%q -> %q]", srcMonitorResultPath, dstMonitorResultPath)
for k := 0; k < 30; k++ {
if uerr = u.UploadFile(t.req.GoogleCloudStorageBucketName, srcMonitorResultPath, dstMonitorResultPath); uerr != nil {
plog.Errorf("u.UploadFile error... sleep and retry... (%v)", uerr)
time.Sleep(2 * time.Second)
continue
} else {
break
{
if t.req.Database == agentpb.Request_zetcd || t.req.Database == agentpb.Request_cetcd {
dpath := fs.databaseLog + "-" + t.req.Database.String()
srcDatabaseLogPath2 := dpath
dstDatabaseLogPath2 := filepath.Base(dpath)
if !strings.HasPrefix(filepath.Base(dpath), t.req.TestName) {
dstDatabaseLogPath2 = fmt.Sprintf("%s-%d-%s", t.req.TestName, t.req.ServerIndex+1, filepath.Base(dpath))
}
dstDatabaseLogPath2 = filepath.Join(t.req.GoogleCloudStorageSubDirectory, dstDatabaseLogPath2)
plog.Infof("uploading proxy-database log [%q -> %q]", srcDatabaseLogPath2, dstDatabaseLogPath2)
for k := 0; k < 30; k++ {
if uerr = u.UploadFile(t.req.GoogleCloudStorageBucketName, srcDatabaseLogPath2, dstDatabaseLogPath2); uerr != nil {
plog.Warningf("UploadFile error... sleep and retry... (%v)", uerr)
time.Sleep(2 * time.Second)
continue
} else {
break
}
}
if uerr != nil {
return uerr
}
}
}
if uerr != nil {
return uerr
{
srcSysMetricsDataPath := fs.systemMetricsCSV
dstSysMetricsDataPath := filepath.Base(fs.systemMetricsCSV)
if !strings.HasPrefix(filepath.Base(fs.systemMetricsCSV), t.req.TestName) {
dstSysMetricsDataPath = fmt.Sprintf("%s-%d-%s", t.req.TestName, t.req.ServerIndex+1, filepath.Base(fs.systemMetricsCSV))
}
dstSysMetricsDataPath = filepath.Join(t.req.GoogleCloudStorageSubDirectory, dstSysMetricsDataPath)
plog.Infof("uploading system metrics data [%q -> %q]", srcSysMetricsDataPath, dstSysMetricsDataPath)
for k := 0; k < 30; k++ {
if uerr := u.UploadFile(t.req.GoogleCloudStorageBucketName, srcSysMetricsDataPath, dstSysMetricsDataPath); uerr != nil {
plog.Warningf("u.UploadFile error... sleep and retry... (%v)", uerr)
time.Sleep(2 * time.Second)
continue
} else {
break
}
}
if uerr != nil {
return uerr
}
}
srcAgentLogPath := fs.agentLog
dstAgentLogPath := filepath.Base(fs.agentLog)
if !strings.HasPrefix(filepath.Base(fs.agentLog), t.req.TestName) {
dstAgentLogPath = fmt.Sprintf("%s-%d-%s", t.req.TestName, t.req.ServerIndex+1, filepath.Base(fs.agentLog))
{
srcSysMetricsInterpolatedDataPath := fs.systemMetricsCSVInterpolated
dstSysMetricsInterpolatedDataPath := filepath.Base(fs.systemMetricsCSVInterpolated)
if !strings.HasPrefix(filepath.Base(fs.systemMetricsCSVInterpolated), t.req.TestName) {
dstSysMetricsInterpolatedDataPath = fmt.Sprintf("%s-%d-%s", t.req.TestName, t.req.ServerIndex+1, filepath.Base(fs.systemMetricsCSVInterpolated))
}
dstSysMetricsInterpolatedDataPath = filepath.Join(t.req.GoogleCloudStorageSubDirectory, dstSysMetricsInterpolatedDataPath)
plog.Infof("uploading system metrics interpolated data [%q -> %q]", srcSysMetricsInterpolatedDataPath, dstSysMetricsInterpolatedDataPath)
for k := 0; k < 30; k++ {
if uerr := u.UploadFile(t.req.GoogleCloudStorageBucketName, srcSysMetricsInterpolatedDataPath, dstSysMetricsInterpolatedDataPath); uerr != nil {
plog.Warningf("u.UploadFile error... sleep and retry... (%v)", uerr)
time.Sleep(2 * time.Second)
continue
} else {
break
}
}
if uerr != nil {
return uerr
}
}
dstAgentLogPath = filepath.Join(t.req.GoogleCloudStorageSubDirectory, dstAgentLogPath)
plog.Infof("uploading agent logs [%q -> %q]", srcAgentLogPath, dstAgentLogPath)
for k := 0; k < 30; k++ {
if uerr = u.UploadFile(t.req.GoogleCloudStorageBucketName, srcAgentLogPath, dstAgentLogPath); uerr != nil {
plog.Errorf("UploadFile error... sleep and retry... (%v)", uerr)
time.Sleep(2 * time.Second)
continue
} else {
break
{
srcAgentLogPath := fs.agentLog
dstAgentLogPath := filepath.Base(fs.agentLog)
if !strings.HasPrefix(filepath.Base(fs.agentLog), t.req.TestName) {
dstAgentLogPath = fmt.Sprintf("%s-%d-%s", t.req.TestName, t.req.ServerIndex+1, filepath.Base(fs.agentLog))
}
dstAgentLogPath = filepath.Join(t.req.GoogleCloudStorageSubDirectory, dstAgentLogPath)
plog.Infof("uploading agent logs [%q -> %q]", srcAgentLogPath, dstAgentLogPath)
for k := 0; k < 30; k++ {
if uerr := u.UploadFile(t.req.GoogleCloudStorageBucketName, srcAgentLogPath, dstAgentLogPath); uerr != nil {
plog.Warningf("UploadFile error... sleep and retry... (%v)", uerr)
time.Sleep(2 * time.Second)
continue
} else {
break
}
}
}

View File

@ -22,16 +22,16 @@ import (
// RawData defines how to aggregate data from each machine.
type RawData struct {
Legend string `yaml:"legend"`
OutputPath string `yaml:"output_path"`
DataSystemMetricsPaths []string `yaml:"data_system_metrics_paths"`
DatasizeSummary string `yaml:"data_size_summary"`
DataBenchmarkLatencyPercentile string `yaml:"data_benchmark_latency_percentile"`
DataBenchmarkLatencySummary string `yaml:"data_benchmark_latency_summary"`
DataBenchmarkThroughput string `yaml:"data_benchmark_throughput"`
DataBenchmarkLatencyByKey string `yaml:"data_benchmark_latency_by_key"`
DataBenchmarkMemoryByKey string `yaml:"data_benchmark_memory_by_key"`
TotalRequests int `yaml:"total_requests"`
Legend string `yaml:"legend"`
OutputPath string `yaml:"output_path"`
DataInterpolatedSystemMetricsPaths []string `yaml:"data_interpolated_system_metrics_paths"`
DatasizeSummary string `yaml:"data_size_summary"`
DataBenchmarkLatencyPercentile string `yaml:"data_benchmark_latency_percentile"`
DataBenchmarkLatencySummary string `yaml:"data_benchmark_latency_summary"`
DataBenchmarkThroughput string `yaml:"data_benchmark_throughput"`
DataBenchmarkLatencyByKey string `yaml:"data_benchmark_latency_by_key"`
DataBenchmarkMemoryByKey string `yaml:"data_benchmark_memory_by_key"`
TotalRequests int `yaml:"total_requests"`
}
// Config defines analyze configuration.

View File

@ -8,10 +8,10 @@ total_requests: 100000
raw_data:
- legend: etcd v3.1 (Go 1.7.4)
output_path: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/etcd-v3.1-go1.7.4-aggregated.csv
data_system_metrics_paths:
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/etcd-v3.1-go1.7.4-1-system-metrics.csv
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/etcd-v3.1-go1.7.4-2-system-metrics.csv
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/etcd-v3.1-go1.7.4-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/etcd-v3.1-go1.7.4-1-system-metrics-interpolated.csv
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/etcd-v3.1-go1.7.4-2-system-metrics-interpolated.csv
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/etcd-v3.1-go1.7.4-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/etcd-v3.1-go1.7.4-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/etcd-v3.1-go1.7.4-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/etcd-v3.1-go1.7.4-data-latency-distribution-summary.csv
@ -21,10 +21,10 @@ raw_data:
- legend: Zookeeper r3.4.9 (Java 8)
output_path: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/zookeeper-r3.4.9-java8-aggregated.csv
data_system_metrics_paths:
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/zookeeper-r3.4.9-java8-1-system-metrics.csv
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/zookeeper-r3.4.9-java8-2-system-metrics.csv
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/zookeeper-r3.4.9-java8-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/zookeeper-r3.4.9-java8-1-system-metrics-interpolated.csv
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/zookeeper-r3.4.9-java8-2-system-metrics-interpolated.csv
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/zookeeper-r3.4.9-java8-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/zookeeper-r3.4.9-java8-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/zookeeper-r3.4.9-java8-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/zookeeper-r3.4.9-java8-data-latency-distribution-summary.csv
@ -34,10 +34,10 @@ raw_data:
- legend: Consul v0.7.2 (Go 1.7.4)
output_path: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/consul-v0.7.2-go1.7.4-aggregated.csv
data_system_metrics_paths:
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/consul-v0.7.2-go1.7.4-1-system-metrics.csv
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/consul-v0.7.2-go1.7.4-2-system-metrics.csv
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/consul-v0.7.2-go1.7.4-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/consul-v0.7.2-go1.7.4-1-system-metrics-interpolated.csv
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/consul-v0.7.2-go1.7.4-2-system-metrics-interpolated.csv
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/consul-v0.7.2-go1.7.4-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/consul-v0.7.2-go1.7.4-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/consul-v0.7.2-go1.7.4-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/consul-v0.7.2-go1.7.4-data-latency-distribution-summary.csv
@ -50,8 +50,8 @@ plot_list:
x_axis: Second
y_axis: Latency(millisecond)
output_path_list:
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/AVG-LATENCY-MS.svg
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/AVG-LATENCY-MS.png
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/AVG-LATENCY-MS.svg
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/AVG-LATENCY-MS.png
- column: AVG-THROUGHPUT
x_axis: Second
@ -92,8 +92,8 @@ plot_list:
x_axis: Second
y_axis: Disk Reads
output_path_list:
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/AVG-READS-COMPLETED-DELTA.svg
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/AVG-READS-COMPLETED-DELTA.png
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/AVG-READS-COMPLETED-DELTA.svg
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/AVG-READS-COMPLETED-DELTA.png
- column: AVG-SECTORS-READ-DELTA
x_axis: Second
@ -120,8 +120,8 @@ plot_list:
x_axis: Second
y_axis: Network Receive(bytes)
output_path_list:
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/AVG-RECEIVE-BYTES-NUM-DELTA.svg
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/AVG-RECEIVE-BYTES-NUM-DELTA.png
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/AVG-RECEIVE-BYTES-NUM-DELTA.svg
- 2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/AVG-RECEIVE-BYTES-NUM-DELTA.png
- column: AVG-TRANSMIT-BYTES-NUM-DELTA
x_axis: Second

View File

@ -46,8 +46,8 @@ func Test_readConfig(t *testing.T) {
if c.RawData[0].OutputPath != "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/etcd-v3.1-go1.7.4-aggregated.csv" {
t.Fatalf("unexpected c.RawData[0].OutputPath %q", c.RawData[0].OutputPath)
}
if c.RawData[0].DataSystemMetricsPaths[0] != "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/etcd-v3.1-go1.7.4-1-system-metrics.csv" {
t.Fatalf("unexpected c.RawData[0].DataSystemMetricsPaths[0] %q", c.RawData[0].DataSystemMetricsPaths[0])
if c.RawData[0].DataInterpolatedSystemMetricsPaths[0] != "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/etcd-v3.1-go1.7.4-1-system-metrics-interpolated.csv" {
t.Fatalf("unexpected c.RawData[0].DataInterpolatedSystemMetricsPaths[0] %q", c.RawData[0].DataInterpolatedSystemMetricsPaths[0])
}
if c.RawData[0].DatasizeSummary != "2017Q1-01-etcd-zookeeper-consul/01-write-1M-keys/etcd-v3.1-go1.7.4-data-size-summary.csv" {
t.Fatalf("unexpected c.RawData[0].DatasizeSummary %q", c.RawData[0].DatasizeSummary)

View File

@ -21,8 +21,10 @@ import (
"github.com/gyuho/dataframe"
)
// sysMetricsColumnsToRead is already aggregated
// and interpolated by unix second.
var sysMetricsColumnsToRead = []string{
"UNIX-TS",
"UNIX-SECOND",
"VOLUNTARY-CTXT-SWITCHES",
"NON-VOLUNTARY-CTXT-SWITCHES",
"CPU-NUM",
@ -44,10 +46,10 @@ var sysMetricsColumnsToRead = []string{
}
type testData struct {
filePath string
frontUnixTS int64
lastUnixTS int64
frame dataframe.Frame
filePath string
frontUnixSecond int64
lastUnixSecond int64
frame dataframe.Frame
}
// readSystemMetrics extracts only the columns that we need for analyze.
@ -59,7 +61,7 @@ func readSystemMetrics(fpath string) (data testData, err error) {
data.filePath = fpath
data.frame = dataframe.New()
var unixTSColumn dataframe.Column
var unixSecondCol dataframe.Column
for _, name := range sysMetricsColumnsToRead {
var column dataframe.Column
column, err = originalFrame.Column(name)
@ -69,18 +71,13 @@ func readSystemMetrics(fpath string) (data testData, err error) {
if err = data.frame.AddColumn(column); err != nil {
return testData{}, err
}
if name == "UNIX-TS" {
// TODO: UNIX-TS from pkg/report data is time.Time.Unix
// UNIX-TS from psn.CSV data is time.Time.UnixNano
// we need some kind of way to combine those with matching timestamps
//
// this unixTSColumn is unix nanoseconds
unixTSColumn = column
if name == "UNIX-SECOND" {
unixSecondCol = column
}
}
// get first(minimum) unix second
fv, ok := unixTSColumn.FrontNonNil()
fv, ok := unixSecondCol.FrontNonNil()
if !ok {
return testData{}, fmt.Errorf("FrontNonNil %s has empty Unix time %v", fpath, fv)
}
@ -88,13 +85,13 @@ func readSystemMetrics(fpath string) (data testData, err error) {
if !ok {
return testData{}, fmt.Errorf("cannot String %v", fv)
}
data.frontUnixTS, err = strconv.ParseInt(fs, 10, 64)
data.frontUnixSecond, err = strconv.ParseInt(fs, 10, 64)
if err != nil {
return testData{}, err
}
// get last(maximum) unix second
bv, ok := unixTSColumn.BackNonNil()
bv, ok := unixSecondCol.BackNonNil()
if !ok {
return testData{}, fmt.Errorf("BackNonNil %s has empty Unix time %v", fpath, fv)
}
@ -102,7 +99,7 @@ func readSystemMetrics(fpath string) (data testData, err error) {
if !ok {
return testData{}, fmt.Errorf("cannot String %v", bv)
}
data.lastUnixTS, err = strconv.ParseInt(bs, 10, 64)
data.lastUnixSecond, err = strconv.ParseInt(bs, 10, 64)
if err != nil {
return testData{}, err
}

View File

@ -24,11 +24,11 @@ type analyzeData struct {
databaseTag string
legend string
minUnixTS int64
maxUnixTS int64
sys []testData
minUnixSecond int64
maxUnixSecond int64
sys []testData
// aggregated frame within [min,maxUnixTS] from sys
// aggregated frame within [min,maxUnixSecond] from sys
sysAgg dataframe.Frame
benchMetricsFilePath string
benchMetrics testData
@ -50,14 +50,14 @@ func readSystemMetricsAll(fpaths ...string) (data *analyzeData, err error) {
return nil, err
}
if i == 0 {
data.minUnixTS = sm.frontUnixTS
data.maxUnixTS = sm.lastUnixTS
data.minUnixSecond = sm.frontUnixSecond
data.maxUnixSecond = sm.lastUnixSecond
}
if data.minUnixTS < sm.frontUnixTS {
data.minUnixTS = sm.frontUnixTS
if data.minUnixSecond < sm.frontUnixSecond {
data.minUnixSecond = sm.frontUnixSecond
}
if data.maxUnixTS > sm.lastUnixTS {
data.maxUnixTS = sm.lastUnixTS
if data.maxUnixSecond > sm.lastUnixSecond {
data.maxUnixSecond = sm.lastUnixSecond
}
data.sys = append(data.sys, sm)
}
@ -67,17 +67,12 @@ func readSystemMetricsAll(fpaths ...string) (data *analyzeData, err error) {
// aggSystemMetrics aggregates all system metrics from 3+ nodes.
func (data *analyzeData) aggSystemMetrics() error {
// monitor CSVs from multiple servers, and want them to have equal number of rows
// Truncate all rows before data.minUnixTS and after data.maxUnixTS
minTS := fmt.Sprintf("%d", data.minUnixTS)
maxTS := fmt.Sprintf("%d", data.maxUnixTS)
// Truncate all rows before data.minUnixSecond and after data.maxUnixSecond
minTS := fmt.Sprintf("%d", data.minUnixSecond)
maxTS := fmt.Sprintf("%d", data.maxUnixSecond)
data.sysAgg = dataframe.New()
for i := range data.sys {
// TODO: UNIX-TS from pkg/report data is time.Time.Unix
// UNIX-TS from psn.CSV data is time.Time.UnixNano
// we need some kind of way to combine those with matching timestamps
//
// this is unix nanoseconds
uc, err := data.sys[i].frame.Column("UNIX-TS")
uc, err := data.sys[i].frame.Column("UNIX-SECOND")
if err != nil {
return err
}
@ -91,8 +86,8 @@ func (data *analyzeData) aggSystemMetrics() error {
}
for _, header := range data.sys[i].frame.Headers() {
if i > 0 && header == "UNIX-TS" {
// skip for other databases; we want to keep just one UNIX-TS column
if i > 0 && header == "UNIX-SECOND" {
// skip for other databases; we want to keep just one UNIX-SECOND column
continue
}
@ -101,12 +96,12 @@ func (data *analyzeData) aggSystemMetrics() error {
if err != nil {
return err
}
// just keep rows from [min,maxUnixTS]
// just keep rows from [min,maxUnixSecond]
if err = col.Keep(minTSIdx, maxTSIdx+1); err != nil {
return err
}
if header == "UNIX-TS" {
if header == "UNIX-SECOND" {
if err = data.sysAgg.AddColumn(col); err != nil {
return err
}

View File

@ -32,7 +32,7 @@ func (data *analyzeData) importBenchMetrics(fpath string) (err error) {
}
var oldTSCol dataframe.Column
oldTSCol, err = tdf.Column("UNIX-TS")
oldTSCol, err = tdf.Column("UNIX-SECOND")
if err != nil {
return err
}
@ -46,7 +46,7 @@ func (data *analyzeData) importBenchMetrics(fpath string) (err error) {
if !ok {
return fmt.Errorf("cannot Int64 %v", fv1)
}
data.benchMetrics.frontUnixTS = int64(ivv1)
data.benchMetrics.frontUnixSecond = int64(ivv1)
// get last(maximum) unix second
fv2, ok := oldTSCol.BackNonNil()
@ -57,9 +57,9 @@ func (data *analyzeData) importBenchMetrics(fpath string) (err error) {
if !ok {
return fmt.Errorf("cannot Int64 %v", fv2)
}
data.benchMetrics.lastUnixTS = int64(ivv2)
data.benchMetrics.lastUnixSecond = int64(ivv2)
// UNIX-TS, CONTROL-CLIENT-NUM, AVG-LATENCY-MS, AVG-THROUGHPUT
// UNIX-SECOND, CONTROL-CLIENT-NUM, AVG-LATENCY-MS, AVG-THROUGHPUT
var oldControlClientNumCol dataframe.Column
oldControlClientNumCol, err = tdf.Column("CONTROL-CLIENT-NUM")
if err != nil {
@ -81,7 +81,7 @@ func (data *analyzeData) importBenchMetrics(fpath string) (err error) {
latency float64
throughput float64
}
tsToData := make(map[int64]rowData)
sec2Data := make(map[int64]rowData)
for i := 0; i < oldTSCol.Count(); i++ {
tv, err := oldTSCol.Value(i)
if err != nil {
@ -120,54 +120,54 @@ func (data *analyzeData) importBenchMetrics(fpath string) (err error) {
return fmt.Errorf("cannot Float64 %v", hv)
}
if v, ok := tsToData[ts]; !ok {
tsToData[ts] = rowData{clientN: cn, latency: dataLat, throughput: dataThr}
if v, ok := sec2Data[ts]; !ok {
sec2Data[ts] = rowData{clientN: cn, latency: dataLat, throughput: dataThr}
} else {
oldCn := v.clientN
if oldCn != cn {
return fmt.Errorf("different client number with same timestamps! %d != %d", oldCn, cn)
}
tsToData[ts] = rowData{clientN: cn, latency: (v.latency + dataLat) / 2.0, throughput: (v.throughput + dataThr) / 2.0}
sec2Data[ts] = rowData{clientN: cn, latency: (v.latency + dataLat) / 2.0, throughput: (v.throughput + dataThr) / 2.0}
}
}
// UNIX-TS, CONTROL-CLIENT-NUM, AVG-LATENCY-MS, AVG-THROUGHPUT
// UNIX-SECOND, CONTROL-CLIENT-NUM, AVG-LATENCY-MS, AVG-THROUGHPUT
// aggregate duplicate benchmark timestamps with average values
// OR fill in missing timestamps with zero values
//
// expected row number
rowN := data.benchMetrics.lastUnixTS - data.benchMetrics.frontUnixTS + 1
newTSCol := dataframe.NewColumn("UNIX-TS")
expectedRowN := data.benchMetrics.lastUnixSecond - data.benchMetrics.frontUnixSecond + 1
newSecondCol := dataframe.NewColumn("UNIX-SECOND")
newControlClientNumCol := dataframe.NewColumn("CONTROL-CLIENT-NUM")
newAvgLatencyCol := dataframe.NewColumn("AVG-LATENCY-MS")
newAvgThroughputCol := dataframe.NewColumn("AVG-THROUGHPUT")
for i := int64(0); i < rowN; i++ {
ts := data.benchMetrics.frontUnixTS + i
newTSCol.PushBack(dataframe.NewStringValue(fmt.Sprintf("%d", ts)))
for i := int64(0); i < expectedRowN; i++ {
second := data.benchMetrics.frontUnixSecond + i
newSecondCol.PushBack(dataframe.NewStringValue(second))
v, ok := tsToData[ts]
v, ok := sec2Data[second]
if !ok {
prev, pok := tsToData[ts-1]
prev, pok := sec2Data[second-1]
if !pok {
prev, pok = tsToData[ts+1]
prev, pok = sec2Data[second+1]
if !pok {
return fmt.Errorf("benchmark missing a lot of rows around %d", ts)
return fmt.Errorf("benchmark missing a lot of rows around %d", second)
}
}
newControlClientNumCol.PushBack(dataframe.NewStringValue(prev.clientN))
// just add empty values
newAvgLatencyCol.PushBack(dataframe.NewStringValue("0.0"))
newControlClientNumCol.PushBack(dataframe.NewStringValue(prev.clientN))
newAvgLatencyCol.PushBack(dataframe.NewStringValue(0.0))
newAvgThroughputCol.PushBack(dataframe.NewStringValue(0))
} else {
newControlClientNumCol.PushBack(dataframe.NewStringValue(v.clientN))
newAvgLatencyCol.PushBack(dataframe.NewStringValue(v.latency))
newAvgThroughputCol.PushBack(dataframe.NewStringValue(v.throughput))
continue
}
newControlClientNumCol.PushBack(dataframe.NewStringValue(v.clientN))
newAvgLatencyCol.PushBack(dataframe.NewStringValue(v.latency))
newAvgThroughputCol.PushBack(dataframe.NewStringValue(v.throughput))
}
df := dataframe.New()
if err = df.AddColumn(newTSCol); err != nil {
if err = df.AddColumn(newSecondCol); err != nil {
return err
}
if err = df.AddColumn(newControlClientNumCol); err != nil {

View File

@ -24,25 +24,15 @@ import (
// aggregateAll aggregates all system metrics from 3+ nodes.
func (data *analyzeData) aggregateAll(memoryByKeyPath string, totalRequests int) error {
// TODO: UNIX-TS from pkg/report data is time.Time.Unix
// UNIX-TS from psn.CSV data is time.Time.UnixNano
// we need some kind of way to combine those with matching timestamps
//
// this is unix "nano-seconds"
colSys, err := data.sysAgg.Column("UNIX-TS")
colSys, err := data.sysAgg.Column("UNIX-SECOND")
if err != nil {
return err
}
colBench, err := data.benchMetrics.frame.Column("UNIX-SECOND")
if err != nil {
return err
}
// TODO: UNIX-TS from pkg/report data is time.Time.Unix
// UNIX-TS from psn.CSV data is time.Time.UnixNano
// we need some kind of way to combine those with matching timestamps
//
// this is unix "seconds"
colBench, err := data.benchMetrics.frame.Column("UNIX-TS")
if err != nil {
return err
}
fv, ok := colBench.FrontNonNil()
if !ok {
return fmt.Errorf("FrontNonNil %s has empty Unix time %v", data.benchMetrics.filePath, fv)
@ -51,6 +41,7 @@ func (data *analyzeData) aggregateAll(memoryByKeyPath string, totalRequests int)
if !ok {
return fmt.Errorf("%v is not found in system metrics results", fv)
}
bv, ok := colBench.BackNonNil()
if !ok {
return fmt.Errorf("BackNonNil %s has empty Unix time %v", data.benchMetrics.filePath, fv)
@ -59,7 +50,8 @@ func (data *analyzeData) aggregateAll(memoryByKeyPath string, totalRequests int)
if !ok {
return fmt.Errorf("%v is not found in system metrics results", fv)
}
sysRowN := sysEndIdx - sysStartIdx + 1
expectedSysRowN := sysEndIdx - sysStartIdx + 1
var minBenchEndIdx int
for _, col := range data.benchMetrics.frame.Columns() {
@ -73,22 +65,24 @@ func (data *analyzeData) aggregateAll(memoryByKeyPath string, totalRequests int)
// this is index, so decrement by 1 to make it as valid index
minBenchEndIdx--
// sysStartIdx 3, sysEndIdx 9, sysRowN 7, minBenchEndIdx 5 (5+1 < 7)
// sysStartIdx 3, sysEndIdx 9, expectedSysRowN 7, minBenchEndIdx 5 (5+1 < 7)
// so benchmark has 6 rows, but system metrics has 7 rows; benchmark is short of rows
// so we should keep system metrics [3, 9)
// THEN sysEndIdx = 3 + 5 = 8 (keep [3, 8+1))
//
// sysStartIdx 3, sysEndIdx 7, sysRowN 5, minBenchEndIdx 5 (5+1 > 5)
// sysStartIdx 3, sysEndIdx 7, expectedSysRowN 5, minBenchEndIdx 5 (5+1 > 5)
// so benchmark has 6 rows, but system metrics has 5 rows; system metrics is short of rows
// so we should keep benchmark [0, 5)
// THEN minBenchEndIdx = 7 - 3 = 4 (keep [0, 4+1))
if minBenchEndIdx+1 < sysRowN {
if minBenchEndIdx+1 < expectedSysRowN {
// benchmark is short of rows
// adjust system-metrics rows to benchmark-metrics
// adjust system metrics rows to benchmark-metrics
// will truncate front of system metrics rows
sysEndIdx = sysStartIdx + minBenchEndIdx
} else {
// system-metrics is short of rows
// adjust benchmark-metrics to system-metrics
// system metrics is short of rows
// adjust benchmark metrics to system-metrics
// will truncate front of benchmark metrics rows
minBenchEndIdx = sysEndIdx - sysStartIdx
}
@ -96,7 +90,7 @@ func (data *analyzeData) aggregateAll(memoryByKeyPath string, totalRequests int)
data.aggregated = dataframe.New()
// first, add bench metrics data
// UNIX-TS, AVG-LATENCY-MS, AVG-THROUGHPUT
// UNIX-SECOND, AVG-LATENCY-MS, AVG-THROUGHPUT
for _, col := range data.benchMetrics.frame.Columns() {
// ALWAYS KEEP FROM FIRST ROW OF BENCHMARKS
// keeps from [a, b)
@ -108,7 +102,7 @@ func (data *analyzeData) aggregateAll(memoryByKeyPath string, totalRequests int)
}
}
for _, col := range data.sysAgg.Columns() {
if col.Header() == "UNIX-TS" {
if col.Header() == "UNIX-SECOND" {
continue
}
if err = col.Keep(sysStartIdx, sysEndIdx+1); err != nil {
@ -302,7 +296,7 @@ func (data *analyzeData) aggregateAll(memoryByKeyPath string, totalRequests int)
}
// add SECOND column
uc, err := data.aggregated.Column("UNIX-TS")
uc, err := data.aggregated.Column("UNIX-SECOND")
if err != nil {
return err
}
@ -331,7 +325,7 @@ func (data *analyzeData) aggregateAll(memoryByKeyPath string, totalRequests int)
}
// currently first columns are ordered as:
// UNIX-TS, SECOND, AVG-CLIENT-NUM, AVG-LATENCY-MS, AVG-THROUGHPUT
// UNIX-SECOND, SECOND, AVG-CLIENT-NUM, AVG-LATENCY-MS, AVG-THROUGHPUT
//
// re-order columns in the following order, to make it more readable
reorder := []string{

View File

@ -49,7 +49,7 @@ func do(configPath string) error {
}
for _, elem := range cfg.RawData {
plog.Printf("reading system metrics data for %s (%q)", makeTag(elem.Legend), elem.Legend)
ad, err := readSystemMetricsAll(elem.DataSystemMetricsPaths...)
ad, err := readSystemMetricsAll(elem.DataInterpolatedSystemMetricsPaths...)
if err != nil {
return err
}
@ -104,7 +104,7 @@ func do(configPath string) error {
// iterate each database's all data
for _, ad := range all.data {
// ad.benchMetrics.frame.Co
// per database
var (
readsCompletedDeltaSum float64
sectorsReadDeltaSum float64
@ -383,14 +383,9 @@ func do(configPath string) error {
return err
}
var legends []string
for _, elem := range cfg.RawData {
legends = append(legends, elem.Legend)
}
// KEYS, AVG-LATENCY-MS
plog.Printf("combining data to %q", cfg.AllLatencyByKey)
allToLatency := make(map[int64]map[string]float64)
allLatencyFrame := dataframe.New()
for _, elem := range cfg.RawData {
fr, err := dataframe.NewFromCSV([]string{"KEYS", "AVG-LATENCY-MS"}, elem.DataBenchmarkLatencyByKey)
if err != nil {
@ -400,65 +395,38 @@ func do(configPath string) error {
if err != nil {
return err
}
colKeys.UpdateHeader(makeHeader("KEYS", makeTag(elem.Legend)))
if err = allLatencyFrame.AddColumn(colKeys); err != nil {
return err
}
colLatency, err := fr.Column("AVG-LATENCY-MS")
if err != nil {
return err
}
for i := 0; i < colKeys.Count(); i++ {
vv1, err := colKeys.Value(i)
if err != nil {
return err
}
kn, _ := vv1.Int64()
vv2, err := colLatency.Value(i)
if err != nil {
return err
}
va, _ := vv2.Float64()
if _, ok := allToLatency[kn]; !ok {
allToLatency[kn] = make(map[string]float64)
}
allToLatency[kn][elem.Legend] = va
}
}
allLatencyKeys := make([]int64, 0, len(allToLatency))
for k := range allToLatency {
allLatencyKeys = append(allLatencyKeys, k)
}
sort.Sort(int64Slice(allLatencyKeys))
cl1 := dataframe.NewColumn("KEYS")
clCols := make([]dataframe.Column, len(legends))
for i, legend := range legends {
clCols[i] = dataframe.NewColumn(makeHeader("AVG-LATENCY-MS", legend))
}
for _, keyNum := range allLatencyKeys {
cl1.PushBack(dataframe.NewStringValue(keyNum))
for i, legend := range legends {
var latV float64
if v, ok := allToLatency[keyNum][legend]; ok {
latV = v
}
clCols[i].PushBack(dataframe.NewStringValue(fmt.Sprintf("%f", latV)))
}
}
allLatencyFrame := dataframe.New()
if err := allLatencyFrame.AddColumn(cl1); err != nil {
return err
}
for _, col := range clCols {
if err := allLatencyFrame.AddColumn(col); err != nil {
colLatency.UpdateHeader(makeHeader("AVG-LATENCY-MS", makeTag(elem.Legend)))
if err = allLatencyFrame.AddColumn(colLatency); err != nil {
return err
}
}
if err := allLatencyFrame.CSV(cfg.AllLatencyByKey); err != nil {
return err
}
allLatencyFrameCfg := PlotConfig{
Column: "AVG-LATENCY-MS",
XAxis: "Keys",
YAxis: "Latency(millisecond)",
OutputPathList: make([]string, len(cfg.PlotList[0].OutputPathList)),
}
allLatencyFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-LATENCY-MS-BY-KEY.svg")
allLatencyFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-LATENCY-MS-BY-KEY.png")
if err = all.draw(allLatencyFrameCfg, allLatencyFrame.Columns()...); err != nil {
return err
}
// KEYS, AVG-VMRSS-MB
plog.Printf("combining data to %q", cfg.AllMemoryByKey)
allToMemory := make(map[int64]map[string]float64)
allMemoryFrame := dataframe.New()
for _, elem := range cfg.RawData {
fr, err := dataframe.NewFromCSV([]string{"KEYS", "AVG-VMRSS-MB"}, elem.DataBenchmarkMemoryByKey)
if err != nil {
@ -468,61 +436,34 @@ func do(configPath string) error {
if err != nil {
return err
}
colKeys.UpdateHeader(makeHeader("KEYS", makeTag(elem.Legend)))
if err = allMemoryFrame.AddColumn(colKeys); err != nil {
return err
}
colMem, err := fr.Column("AVG-VMRSS-MB")
if err != nil {
return err
}
for i := 0; i < colKeys.Count(); i++ {
vv1, err := colKeys.Value(i)
if err != nil {
return err
}
kn, _ := vv1.Int64()
vv2, err := colMem.Value(i)
if err != nil {
return err
}
va, _ := vv2.Float64()
if _, ok := allToMemory[kn]; !ok {
allToMemory[kn] = make(map[string]float64)
}
allToMemory[kn][elem.Legend] = va
}
}
allMemoryKeys := make([]int64, 0, len(allToMemory))
for k := range allToMemory {
allMemoryKeys = append(allMemoryKeys, k)
}
sort.Sort(int64Slice(allMemoryKeys))
cm1 := dataframe.NewColumn("KEYS")
cmCols := make([]dataframe.Column, len(legends))
for i, legend := range legends {
cmCols[i] = dataframe.NewColumn(makeHeader("AVG-VMRSS-MB", legend))
}
for _, keyNum := range allMemoryKeys {
cm1.PushBack(dataframe.NewStringValue(keyNum))
for i, legend := range legends {
var memoryV float64
if v, ok := allToMemory[keyNum][legend]; ok {
memoryV = v
}
cmCols[i].PushBack(dataframe.NewStringValue(fmt.Sprintf("%.2f", memoryV)))
}
}
allMemoryFrame := dataframe.New()
if err := allMemoryFrame.AddColumn(cm1); err != nil {
return err
}
for _, col := range cmCols {
if err := allMemoryFrame.AddColumn(col); err != nil {
colMem.UpdateHeader(makeHeader("AVG-LATENCY-MS", makeTag(elem.Legend)))
if err = allMemoryFrame.AddColumn(colMem); err != nil {
return err
}
}
if err := allMemoryFrame.CSV(cfg.AllMemoryByKey); err != nil {
return err
}
allMemoryFrameCfg := PlotConfig{
Column: "AVG-VMRSS-MB",
XAxis: "Keys",
YAxis: "Memory(MB)",
OutputPathList: make([]string, len(cfg.PlotList[0].OutputPathList)),
}
allLatencyFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-VMRSS-MB-BY-KEY.svg")
allLatencyFrameCfg.OutputPathList[0] = filepath.Join(filepath.Dir(cfg.PlotList[0].OutputPathList[0]), "AVG-VMRSS-MB-BY-KEY.png")
if err = all.draw(allMemoryFrameCfg, allMemoryFrame.Columns()...); err != nil {
return err
}
plog.Println("combining data for plotting")
for _, plotConfig := range cfg.PlotList {
@ -661,9 +602,3 @@ func changeExtToTxt(fpath string) string {
ext := filepath.Ext(fpath)
return strings.Replace(fpath, ext, ".txt", -1)
}
type int64Slice []int64
func (a int64Slice) Len() int { return len(a) }
func (a int64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a int64Slice) Less(i, j int) bool { return a[i] < a[j] }

View File

@ -8,10 +8,10 @@ total_requests: 1000000
raw_data:
- legend: etcd v3.1 (Go 1.7.4)
output_path: 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.4-aggregated.csv
data_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.4-1-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.4-2-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.4-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.4-1-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.4-2-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.4-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.4-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.4-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/etcd-v3.1-go1.7.4-data-latency-distribution-summary.csv
@ -21,10 +21,10 @@ raw_data:
- legend: Zookeeper r3.4.9 (Java 8)
output_path: 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-aggregated.csv
data_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-1-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-2-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-1-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-2-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/zookeeper-r3.4.9-java8-data-latency-distribution-summary.csv
@ -34,10 +34,10 @@ raw_data:
- legend: Consul v0.7.3 (Go 1.7.4)
output_path: 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.3-go1.7.4-aggregated.csv
data_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.3-go1.7.4-1-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.3-go1.7.4-2-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.3-go1.7.4-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.3-go1.7.4-1-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.3-go1.7.4-2-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.3-go1.7.4-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.3-go1.7.4-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.3-go1.7.4-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-02-etcd-zookeeper-consul/01-write-1M-keys-client-variable/consul-v0.7.3-go1.7.4-data-latency-distribution-summary.csv

View File

@ -8,10 +8,10 @@ total_requests: 1000000
raw_data:
- legend: etcd v3.1 (Go 1.7.4)
output_path: 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/etcd-v3.1-go1.7.4-aggregated.csv
data_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/etcd-v3.1-go1.7.4-1-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/etcd-v3.1-go1.7.4-2-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/etcd-v3.1-go1.7.4-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/etcd-v3.1-go1.7.4-1-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/etcd-v3.1-go1.7.4-2-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/etcd-v3.1-go1.7.4-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/etcd-v3.1-go1.7.4-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/etcd-v3.1-go1.7.4-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/etcd-v3.1-go1.7.4-data-latency-distribution-summary.csv
@ -21,10 +21,10 @@ raw_data:
- legend: Zookeeper r3.4.9 (Java 8)
output_path: 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/zookeeper-r3.4.9-java8-aggregated.csv
data_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/zookeeper-r3.4.9-java8-1-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/zookeeper-r3.4.9-java8-2-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/zookeeper-r3.4.9-java8-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/zookeeper-r3.4.9-java8-1-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/zookeeper-r3.4.9-java8-2-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/zookeeper-r3.4.9-java8-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/zookeeper-r3.4.9-java8-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/zookeeper-r3.4.9-java8-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/zookeeper-r3.4.9-java8-data-latency-distribution-summary.csv
@ -34,10 +34,10 @@ raw_data:
- legend: Consul v0.7.3 (Go 1.7.4)
output_path: 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/consul-v0.7.3-go1.7.4-aggregated.csv
data_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/consul-v0.7.3-go1.7.4-1-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/consul-v0.7.3-go1.7.4-2-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/consul-v0.7.3-go1.7.4-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/consul-v0.7.3-go1.7.4-1-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/consul-v0.7.3-go1.7.4-2-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/consul-v0.7.3-go1.7.4-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/consul-v0.7.3-go1.7.4-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/consul-v0.7.3-go1.7.4-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-02-etcd-zookeeper-consul/02-write-1M-keys-best-throughput/consul-v0.7.3-go1.7.4-data-latency-distribution-summary.csv

View File

@ -8,10 +8,10 @@ total_requests: 1000000
raw_data:
- legend: etcd v3.1 (Go 1.7.4)
output_path: 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/etcd-v3.1-go1.7.4-aggregated.csv
data_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/etcd-v3.1-go1.7.4-1-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/etcd-v3.1-go1.7.4-2-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/etcd-v3.1-go1.7.4-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/etcd-v3.1-go1.7.4-1-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/etcd-v3.1-go1.7.4-2-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/etcd-v3.1-go1.7.4-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/etcd-v3.1-go1.7.4-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/etcd-v3.1-go1.7.4-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/etcd-v3.1-go1.7.4-data-latency-distribution-summary.csv
@ -21,10 +21,10 @@ raw_data:
- legend: Zookeeper r3.4.9 (Java 8)
output_path: 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/zookeeper-r3.4.9-java8-aggregated.csv
data_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/zookeeper-r3.4.9-java8-1-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/zookeeper-r3.4.9-java8-2-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/zookeeper-r3.4.9-java8-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/zookeeper-r3.4.9-java8-1-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/zookeeper-r3.4.9-java8-2-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/zookeeper-r3.4.9-java8-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/zookeeper-r3.4.9-java8-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/zookeeper-r3.4.9-java8-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/zookeeper-r3.4.9-java8-data-latency-distribution-summary.csv
@ -34,10 +34,10 @@ raw_data:
- legend: Consul v0.7.3 (Go 1.7.4)
output_path: 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/consul-v0.7.3-go1.7.4-aggregated.csv
data_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/consul-v0.7.3-go1.7.4-1-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/consul-v0.7.3-go1.7.4-2-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/consul-v0.7.3-go1.7.4-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/consul-v0.7.3-go1.7.4-1-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/consul-v0.7.3-go1.7.4-2-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/consul-v0.7.3-go1.7.4-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/consul-v0.7.3-go1.7.4-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/consul-v0.7.3-go1.7.4-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-02-etcd-zookeeper-consul/03-write-1M-keys-1000-client/consul-v0.7.3-go1.7.4-data-latency-distribution-summary.csv

View File

@ -8,10 +8,10 @@ total_requests: ????
raw_data:
- legend: etcd v3.1 (Go 1.7.4)
output_path: 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/etcd-v3.1-go1.7.4-aggregated.csv
data_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/etcd-v3.1-go1.7.4-1-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/etcd-v3.1-go1.7.4-2-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/etcd-v3.1-go1.7.4-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/etcd-v3.1-go1.7.4-1-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/etcd-v3.1-go1.7.4-2-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/etcd-v3.1-go1.7.4-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keysetcd-v3.1-go1.7.4-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/etcd-v3.1-go1.7.4-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/etcd-v3.1-go1.7.4-data-latency-distribution-summary.csv
@ -21,10 +21,10 @@ raw_data:
- legend: Zookeeper r3.4.9 (Java 8)
output_path: 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/zookeeper-r3.4.9-java8-aggregated.csv
data_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/zookeeper-r3.4.9-java8-1-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/zookeeper-r3.4.9-java8-2-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/zookeeper-r3.4.9-java8-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/zookeeper-r3.4.9-java8-1-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/zookeeper-r3.4.9-java8-2-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/zookeeper-r3.4.9-java8-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/zookeeper-r3.4.9-java8-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/zookeeper-r3.4.9-java8-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/zookeeper-r3.4.9-java8-data-latency-distribution-summary.csv
@ -34,10 +34,10 @@ raw_data:
- legend: Consul v0.7.3 (Go 1.7.4)
output_path: 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/consul-v0.7.3-go1.7.4-aggregated.csv
data_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/consul-v0.7.3-go1.7.4-1-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/consul-v0.7.3-go1.7.4-2-system-metrics.csv
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/consul-v0.7.3-go1.7.4-3-system-metrics.csv
data_interpolated_system_metrics_paths:
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/consul-v0.7.3-go1.7.4-1-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/consul-v0.7.3-go1.7.4-2-system-metrics-interpolated.csv
- 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/consul-v0.7.3-go1.7.4-3-system-metrics-interpolated.csv
data_size_summary: 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/consul-v0.7.3-go1.7.4-data-size-summary.csv
data_benchmark_latency_percentile: 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/consul-v0.7.3-go1.7.4-data-latency-distribution-percentile.csv
data_benchmark_latency_summary: 2017Q1-02-etcd-zookeeper-consul/04-write-too-many-keys/consul-v0.7.3-go1.7.4-data-latency-distribution-summary.csv

View File

@ -17,7 +17,6 @@ package control
import (
"fmt"
"math"
"sort"
"strings"
"sync"
"time"
@ -292,10 +291,7 @@ func saveDataLatencyDistributionAll(cfg Config, st report.Stats) {
}
func saveDataLatencyThroughputTimeseries(cfg Config, st report.Stats, tsToClientN map[int64]int) {
// TODO: UNIX-TS from pkg/report data is time.Time.Unix
// UNIX-TS from psn.CSV data is time.Time.UnixNano
// we need some kind of way to combine those with matching timestamps
c1 := dataframe.NewColumn("UNIX-TS")
c1 := dataframe.NewColumn("UNIX-SECOND")
c2 := dataframe.NewColumn("CONTROL-CLIENT-NUM")
c3 := dataframe.NewColumn("AVG-LATENCY-MS")
c4 := dataframe.NewColumn("AVG-THROUGHPUT")
@ -370,74 +366,3 @@ func saveAllStats(cfg Config, stats report.Stats, tsToClientN map[int64]int) {
// cfg.DataLatencyThroughputTimeseries
saveDataLatencyThroughputTimeseries(cfg, stats, tsToClientN)
}
// processTimeSeries sorts all data points by its timestamp.
// And then aggregate by the cumulative throughput,
// in order to map the number of keys to the average latency.
//
// type DataPoint struct {
// Timestamp int64
// AvgLatency time.Duration
// ThroughPut int64
// }
//
// If unis is 1000 and the average throughput per second is 30,000
// and its average latency is 10ms, it will have 30 data points with
// latency 10ms.
func processTimeSeries(tss report.TimeSeries, unit int64, totalRequests int) keyNumToAvgLatencys {
sort.Sort(tss)
cumulKeyN := int64(0)
maxKey := int64(0)
rm := make(map[int64]time.Duration)
// this data is aggregated by second
// and we want to map number of keys to latency
// so the range is the key
// and the value is the cumulative throughput
for _, ts := range tss {
cumulKeyN += ts.ThroughPut
if cumulKeyN < unit {
// not enough data points yet
continue
}
lat := ts.AvgLatency
// cumulKeyN >= unit
for cumulKeyN > maxKey {
maxKey += unit
rm[maxKey] = lat
}
}
// fill-in empty rows
for i := maxKey; i < int64(totalRequests); i += unit {
if _, ok := rm[i]; !ok {
rm[i] = time.Duration(0)
}
}
if _, ok := rm[int64(totalRequests)]; !ok {
rm[int64(totalRequests)] = time.Duration(0)
}
kss := []keyNumToAvgLatency{}
for k, v := range rm {
kss = append(kss, keyNumToAvgLatency{keyNum: k, avgLat: v})
}
sort.Sort(keyNumToAvgLatencys(kss))
return kss
}
type keyNumToAvgLatency struct {
keyNum int64
avgLat time.Duration
}
type keyNumToAvgLatencys []keyNumToAvgLatency
func (t keyNumToAvgLatencys) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t keyNumToAvgLatencys) Len() int { return len(t) }
func (t keyNumToAvgLatencys) Less(i, j int) bool { return t[i].keyNum < t[j].keyNum }

View File

@ -0,0 +1,93 @@
// Copyright 2017 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package control
import (
"sort"
"time"
"github.com/coreos/etcd/pkg/report"
)
// processTimeSeries sorts all data points by its timestamp.
// And then aggregate by the cumulative throughput,
// in order to map the number of keys to the average latency.
//
// type DataPoint struct {
// Timestamp int64
// AvgLatency time.Duration
// ThroughPut int64
// }
//
// If unis is 1000 and the average throughput per second is 30,000
// and its average latency is 10ms, it will have 30 data points with
// latency 10ms.
func processTimeSeries(tss report.TimeSeries, unit int64, totalRequests int) keyNumToAvgLatencys {
sort.Sort(tss)
cumulKeyN := int64(0)
maxKey := int64(0)
rm := make(map[int64]time.Duration)
// this data is aggregated by second
// and we want to map number of keys to latency
// so the range is the key
// and the value is the cumulative throughput
for _, ts := range tss {
cumulKeyN += ts.ThroughPut
if cumulKeyN < unit {
// not enough data points yet
continue
}
lat := ts.AvgLatency
// cumulKeyN >= unit
for cumulKeyN > maxKey {
maxKey += unit
rm[maxKey] = lat
}
}
// fill-in empty rows
for i := maxKey; i < int64(totalRequests); i += unit {
if _, ok := rm[i]; !ok {
rm[i] = time.Duration(0)
}
}
if _, ok := rm[int64(totalRequests)]; !ok {
rm[int64(totalRequests)] = time.Duration(0)
}
kss := []keyNumToAvgLatency{}
for k, v := range rm {
kss = append(kss, keyNumToAvgLatency{keyNum: k, avgLat: v})
}
sort.Sort(keyNumToAvgLatencys(kss))
return kss
}
type keyNumToAvgLatency struct {
keyNum int64
avgLat time.Duration
}
type keyNumToAvgLatencys []keyNumToAvgLatency
func (t keyNumToAvgLatencys) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t keyNumToAvgLatencys) Len() int { return len(t) }
func (t keyNumToAvgLatencys) Less(i, j int) bool { return t[i].keyNum < t[j].keyNum }

21
control/util_sort.go Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2017 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package control
type int64Slice []int64
func (a int64Slice) Len() int { return len(a) }
func (a int64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a int64Slice) Less(i, j int) bool { return a[i] < a[j] }

18
glide.lock generated
View File

@ -1,5 +1,5 @@
hash: a742783c5966644247e12ddcda9629965818fdd68e659a0866f279a88d49290f
updated: 2017-01-27T15:23:50.941693612-08:00
hash: 37c2e1c77ebff89ce9c3c18607c70a384f09e122d335d345a8794a6c4eb1243d
updated: 2017-02-03T00:30:14.444883937-08:00
imports:
- name: bitbucket.org/zombiezen/gopdf
version: 1c63dc69751bc45441c2ce1f56b631c55294b4d5
@ -95,19 +95,19 @@ imports:
- runtime/internal
- utilities
- name: github.com/gyuho/dataframe
version: e715fd4225590f6558961c316924e56a8a2647b0
version: f9fe6fd9537b000e95362aefbcc0250679e1549b
- name: github.com/gyuho/psn
version: 5a0ffadd2ecaca939e373bf01aa45ac66c8fa2c0
version: ab7b49dddbe2aa6039443168555ef205c5de7145
subpackages:
- schema
- name: github.com/hashicorp/consul
version: 8d57727ff0d113a97c89a992e4681680f95cbf03
version: 3da73be55c82a7f88f1dfd3ec16d267970ac8ff0
subpackages:
- api
- name: github.com/hashicorp/go-cleanhttp
version: ad28ea4487f05916463e2423a55166280e8254b5
- name: github.com/hashicorp/serf
version: 44157f9dfdbcf6d9e20fcfd52cfee348dbc467ee
version: 34e94dbd8faa991710b442c22ad6ad37c8b44c3b
subpackages:
- coordinate
- name: github.com/inconshreveable/mousetrap
@ -118,7 +118,7 @@ imports:
- draw2dbase
- draw2dimg
- name: github.com/mattn/go-runewidth
version: 737072b4e32b7a5018b4a7125da8d12de90e8045
version: 14207d285c6c197daabb5c9793d63e7af9ab2d50
- name: github.com/matttproud/golang_protobuf_extensions
version: c12348ce28de40eed0136aa2b644d0ee0650e56c
subpackages:
@ -140,7 +140,7 @@ imports:
- internal/bitbucket.org/ww/goautoneg
- model
- name: github.com/prometheus/procfs
version: fcdb11ccb4389efb1b210b7ffb623ab71c5fdd60
version: 1878d9fbb537119d24b21ca07effd591627cd160
- name: github.com/samuel/go-zookeeper
version: 1d7be4effb13d2d908342d349d71a284a7542693
subpackages:
@ -154,7 +154,7 @@ imports:
subpackages:
- codec
- name: golang.org/x/image
version: 83686c547965220f8b5d75e83ddc67d73420a89f
version: 55ae12acc992ce6034ae993e4212fd6f8057b1c6
subpackages:
- draw
- font

View File

@ -52,11 +52,11 @@ import:
- vg/vgpdf
- vg/vgsvg
- package: github.com/gyuho/dataframe
version: e715fd4225590f6558961c316924e56a8a2647b0
version: f9fe6fd9537b000e95362aefbcc0250679e1549b
- package: github.com/gyuho/psn
version: 5a0ffadd2ecaca939e373bf01aa45ac66c8fa2c0
version: ab7b49dddbe2aa6039443168555ef205c5de7145
- package: github.com/hashicorp/consul
version: 8d57727ff0d113a97c89a992e4681680f95cbf03
version: 3da73be55c82a7f88f1dfd3ec16d267970ac8ff0
subpackages:
- api
- package: github.com/samuel/go-zookeeper

View File

@ -0,0 +1,82 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package netutil
import (
"fmt"
"os/exec"
)
// DropPort drops all tcp packets that are received from the given port and sent to the given port.
func DropPort(port int) error {
cmdStr := fmt.Sprintf("sudo iptables -A OUTPUT -p tcp --destination-port %d -j DROP", port)
if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil {
return err
}
cmdStr = fmt.Sprintf("sudo iptables -A INPUT -p tcp --destination-port %d -j DROP", port)
_, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
return err
}
// RecoverPort stops dropping tcp packets at given port.
func RecoverPort(port int) error {
cmdStr := fmt.Sprintf("sudo iptables -D OUTPUT -p tcp --destination-port %d -j DROP", port)
if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil {
return err
}
cmdStr = fmt.Sprintf("sudo iptables -D INPUT -p tcp --destination-port %d -j DROP", port)
_, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
return err
}
// SetLatency adds latency in millisecond scale with random variations.
func SetLatency(ms, rv int) error {
ifces, err := GetDefaultInterfaces()
if err != nil {
return err
}
if rv > ms {
rv = 1
}
for ifce := range ifces {
cmdStr := fmt.Sprintf("sudo tc qdisc add dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
if err != nil {
// the rule has already been added. Overwrite it.
cmdStr = fmt.Sprintf("sudo tc qdisc change dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
if err != nil {
return err
}
}
}
return nil
}
// RemoveLatency resets latency configurations.
func RemoveLatency() error {
ifces, err := GetDefaultInterfaces()
if err != nil {
return err
}
for ifce := range ifces {
_, err = exec.Command("/bin/sh", "-c", fmt.Sprintf("sudo tc qdisc del dev %s root netem", ifce)).Output()
if err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,25 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !linux
package netutil
func DropPort(port int) error { return nil }
func RecoverPort(port int) error { return nil }
func SetLatency(ms, rv int) error { return nil }
func RemoveLatency() error { return nil }

143
pkg/netutil/netutil.go Normal file
View File

@ -0,0 +1,143 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package netutil implements network-related utility functions.
package netutil
import (
"net"
"net/url"
"reflect"
"sort"
"time"
"golang.org/x/net/context"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/pkg/capnslog"
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil")
// indirection for testing
resolveTCPAddr = net.ResolveTCPAddr
)
const retryInterval = time.Second
// resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr.
// resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames
// are resolved.
func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error) {
newurls := make([][]url.URL, 0)
for _, us := range urls {
nus := make([]url.URL, len(us))
for i, u := range us {
nu, err := url.Parse(u.String())
if err != nil {
return nil, err
}
nus[i] = *nu
}
for i, u := range nus {
h, err := resolveURL(ctx, u)
if err != nil {
return nil, err
}
if h != "" {
nus[i].Host = h
}
}
newurls = append(newurls, nus)
}
return newurls, nil
}
func resolveURL(ctx context.Context, u url.URL) (string, error) {
for ctx.Err() == nil {
host, _, err := net.SplitHostPort(u.Host)
if err != nil {
plog.Errorf("could not parse url %s during tcp resolving", u.Host)
return "", err
}
if host == "localhost" || net.ParseIP(host) != nil {
return "", nil
}
tcpAddr, err := resolveTCPAddr("tcp", u.Host)
if err == nil {
plog.Infof("resolving %s to %s", u.Host, tcpAddr.String())
return tcpAddr.String(), nil
}
plog.Warningf("failed resolving host %s (%v); retrying in %v", u.Host, err, retryInterval)
select {
case <-ctx.Done():
plog.Errorf("could not resolve host %s", u.Host)
return "", err
case <-time.After(retryInterval):
}
}
return "", ctx.Err()
}
// urlsEqual checks equality of url.URLS between two arrays.
// This check pass even if an URL is in hostname and opposite is in IP address.
func urlsEqual(ctx context.Context, a []url.URL, b []url.URL) bool {
if len(a) != len(b) {
return false
}
urls, err := resolveTCPAddrs(ctx, [][]url.URL{a, b})
if err != nil {
return false
}
a, b = urls[0], urls[1]
sort.Sort(types.URLs(a))
sort.Sort(types.URLs(b))
for i := range a {
if !reflect.DeepEqual(a[i], b[i]) {
return false
}
}
return true
}
func URLStringsEqual(ctx context.Context, a []string, b []string) bool {
if len(a) != len(b) {
return false
}
urlsA := make([]url.URL, 0)
for _, str := range a {
u, err := url.Parse(str)
if err != nil {
return false
}
urlsA = append(urlsA, *u)
}
urlsB := make([]url.URL, 0)
for _, str := range b {
u, err := url.Parse(str)
if err != nil {
return false
}
urlsB = append(urlsB, *u)
}
return urlsEqual(ctx, urlsA, urlsB)
}
func IsNetworkTimeoutError(err error) bool {
nerr, ok := err.(net.Error)
return ok && nerr.Timeout()
}

263
pkg/netutil/netutil_test.go Normal file
View File

@ -0,0 +1,263 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package netutil
import (
"errors"
"net"
"net/url"
"reflect"
"strconv"
"testing"
"time"
"golang.org/x/net/context"
)
func TestResolveTCPAddrs(t *testing.T) {
defer func() { resolveTCPAddr = net.ResolveTCPAddr }()
tests := []struct {
urls [][]url.URL
expected [][]url.URL
hostMap map[string]string
hasError bool
}{
{
urls: [][]url.URL{
{
{Scheme: "http", Host: "127.0.0.1:4001"},
{Scheme: "http", Host: "127.0.0.1:2379"},
},
{
{Scheme: "http", Host: "127.0.0.1:7001"},
{Scheme: "http", Host: "127.0.0.1:2380"},
},
},
expected: [][]url.URL{
{
{Scheme: "http", Host: "127.0.0.1:4001"},
{Scheme: "http", Host: "127.0.0.1:2379"},
},
{
{Scheme: "http", Host: "127.0.0.1:7001"},
{Scheme: "http", Host: "127.0.0.1:2380"},
},
},
},
{
urls: [][]url.URL{
{
{Scheme: "http", Host: "infra0.example.com:4001"},
{Scheme: "http", Host: "infra0.example.com:2379"},
},
{
{Scheme: "http", Host: "infra0.example.com:7001"},
{Scheme: "http", Host: "infra0.example.com:2380"},
},
},
expected: [][]url.URL{
{
{Scheme: "http", Host: "10.0.1.10:4001"},
{Scheme: "http", Host: "10.0.1.10:2379"},
},
{
{Scheme: "http", Host: "10.0.1.10:7001"},
{Scheme: "http", Host: "10.0.1.10:2380"},
},
},
hostMap: map[string]string{
"infra0.example.com": "10.0.1.10",
},
hasError: false,
},
{
urls: [][]url.URL{
{
{Scheme: "http", Host: "infra0.example.com:4001"},
{Scheme: "http", Host: "infra0.example.com:2379"},
},
{
{Scheme: "http", Host: "infra0.example.com:7001"},
{Scheme: "http", Host: "infra0.example.com:2380"},
},
},
hostMap: map[string]string{
"infra0.example.com": "",
},
hasError: true,
},
{
urls: [][]url.URL{
{
{Scheme: "http", Host: "ssh://infra0.example.com:4001"},
{Scheme: "http", Host: "ssh://infra0.example.com:2379"},
},
{
{Scheme: "http", Host: "ssh://infra0.example.com:7001"},
{Scheme: "http", Host: "ssh://infra0.example.com:2380"},
},
},
hasError: true,
},
}
for _, tt := range tests {
resolveTCPAddr = func(network, addr string) (*net.TCPAddr, error) {
host, port, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
if tt.hostMap[host] == "" {
return nil, errors.New("cannot resolve host.")
}
i, err := strconv.Atoi(port)
if err != nil {
return nil, err
}
return &net.TCPAddr{IP: net.ParseIP(tt.hostMap[host]), Port: i, Zone: ""}, nil
}
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
urls, err := resolveTCPAddrs(ctx, tt.urls)
cancel()
if tt.hasError {
if err == nil {
t.Errorf("expected error")
}
continue
}
if !reflect.DeepEqual(urls, tt.expected) {
t.Errorf("expected: %v, got %v", tt.expected, urls)
}
}
}
func TestURLsEqual(t *testing.T) {
defer func() { resolveTCPAddr = net.ResolveTCPAddr }()
hostm := map[string]string{
"example.com": "10.0.10.1",
"first.com": "10.0.11.1",
"second.com": "10.0.11.2",
}
resolveTCPAddr = func(network, addr string) (*net.TCPAddr, error) {
host, port, err := net.SplitHostPort(addr)
if _, ok := hostm[host]; !ok {
return nil, errors.New("cannot resolve host.")
}
i, err := strconv.Atoi(port)
if err != nil {
return nil, err
}
return &net.TCPAddr{IP: net.ParseIP(hostm[host]), Port: i, Zone: ""}, nil
}
tests := []struct {
a []url.URL
b []url.URL
expect bool
}{
{
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}},
expect: true,
},
{
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}},
expect: true,
},
{
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: true,
},
{
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: true,
},
{
a: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: true,
},
{
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
},
{
a: []url.URL{{Scheme: "http", Host: "example.com:2380"}},
b: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}},
expect: false,
},
{
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}},
expect: false,
},
{
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}},
expect: false,
},
{
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2380"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
},
{
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2380"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
},
{
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
},
{
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
},
{
a: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
},
{
a: []url.URL{{Scheme: "http", Host: "first.com:2379"}, {Scheme: "http", Host: "second.com:2380"}},
b: []url.URL{{Scheme: "http", Host: "10.0.11.1:2379"}, {Scheme: "http", Host: "10.0.11.2:2380"}},
expect: true,
},
{
a: []url.URL{{Scheme: "http", Host: "second.com:2380"}, {Scheme: "http", Host: "first.com:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.11.1:2379"}, {Scheme: "http", Host: "10.0.11.2:2380"}},
expect: true,
},
}
for _, test := range tests {
result := urlsEqual(context.TODO(), test.a, test.b)
if result != test.expect {
t.Errorf("a:%v b:%v, expected %v but %v", test.a, test.b, test.expect, result)
}
}
}
func TestURLStringsEqual(t *testing.T) {
result := URLStringsEqual(context.TODO(), []string{"http://127.0.0.1:8080"}, []string{"http://127.0.0.1:8080"})
if !result {
t.Errorf("unexpected result %v", result)
}
}

33
pkg/netutil/routes.go Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !linux
package netutil
import (
"fmt"
"runtime"
)
// GetDefaultHost fetches the a resolvable name that corresponds
// to the machine's default routable interface
func GetDefaultHost() (string, error) {
return "", fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
}
// GetDefaultInterface fetches the device name of default routable interface.
func GetDefaultInterface() (map[string]uint8, error) {
return "", fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
}

228
pkg/netutil/routes_linux.go Normal file
View File

@ -0,0 +1,228 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package netutil
import (
"bytes"
"encoding/binary"
"fmt"
"net"
"syscall"
"github.com/coreos/etcd/pkg/cpuutil"
)
var errNoDefaultRoute = fmt.Errorf("could not find default route")
var errNoDefaultHost = fmt.Errorf("could not find default host")
var errNoDefaultInterface = fmt.Errorf("could not find default interface")
// GetDefaultHost obtains the first IP address of machine from the routing table and returns the IP address as string.
// An IPv4 address is preferred to an IPv6 address for backward compatibility.
func GetDefaultHost() (string, error) {
rmsgs, rerr := getDefaultRoutes()
if rerr != nil {
return "", rerr
}
for family, rmsg := range rmsgs {
host, oif, err := parsePREFSRC(rmsg)
if err != nil {
return "", err
}
if host != "" {
return host, nil
}
// prefsrc not detected, fall back to getting address from iface
ifmsg, ierr := getIfaceAddr(oif, family)
if ierr != nil {
return "", ierr
}
attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
if aerr != nil {
return "", aerr
}
for _, attr := range attrs {
// search for RTA_DST because ipv6 doesn't have RTA_SRC
if attr.Attr.Type == syscall.RTA_DST {
return net.IP(attr.Value).String(), nil
}
}
}
return "", errNoDefaultHost
}
func getDefaultRoutes() (map[uint8]*syscall.NetlinkMessage, error) {
dat, err := syscall.NetlinkRIB(syscall.RTM_GETROUTE, syscall.AF_UNSPEC)
if err != nil {
return nil, err
}
msgs, msgErr := syscall.ParseNetlinkMessage(dat)
if msgErr != nil {
return nil, msgErr
}
routes := make(map[uint8]*syscall.NetlinkMessage)
rtmsg := syscall.RtMsg{}
for _, m := range msgs {
if m.Header.Type != syscall.RTM_NEWROUTE {
continue
}
buf := bytes.NewBuffer(m.Data[:syscall.SizeofRtMsg])
if rerr := binary.Read(buf, cpuutil.ByteOrder(), &rtmsg); rerr != nil {
continue
}
if rtmsg.Dst_len == 0 && rtmsg.Table == syscall.RT_TABLE_MAIN {
// zero-length Dst_len implies default route
msg := m
routes[rtmsg.Family] = &msg
}
}
if len(routes) > 0 {
return routes, nil
}
return nil, errNoDefaultRoute
}
// Used to get an address of interface.
func getIfaceAddr(idx uint32, family uint8) (*syscall.NetlinkMessage, error) {
dat, err := syscall.NetlinkRIB(syscall.RTM_GETADDR, int(family))
if err != nil {
return nil, err
}
msgs, msgErr := syscall.ParseNetlinkMessage(dat)
if msgErr != nil {
return nil, msgErr
}
ifaddrmsg := syscall.IfAddrmsg{}
for _, m := range msgs {
if m.Header.Type != syscall.RTM_NEWADDR {
continue
}
buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfAddrmsg])
if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifaddrmsg); rerr != nil {
continue
}
if ifaddrmsg.Index == idx {
return &m, nil
}
}
return nil, fmt.Errorf("could not find address for interface index %v", idx)
}
// Used to get a name of interface.
func getIfaceLink(idx uint32) (*syscall.NetlinkMessage, error) {
dat, err := syscall.NetlinkRIB(syscall.RTM_GETLINK, syscall.AF_UNSPEC)
if err != nil {
return nil, err
}
msgs, msgErr := syscall.ParseNetlinkMessage(dat)
if msgErr != nil {
return nil, msgErr
}
ifinfomsg := syscall.IfInfomsg{}
for _, m := range msgs {
if m.Header.Type != syscall.RTM_NEWLINK {
continue
}
buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfInfomsg])
if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifinfomsg); rerr != nil {
continue
}
if ifinfomsg.Index == int32(idx) {
return &m, nil
}
}
return nil, fmt.Errorf("could not find link for interface index %v", idx)
}
// GetDefaultInterfaces gets names of interfaces and returns a map[interface]families.
func GetDefaultInterfaces() (map[string]uint8, error) {
interfaces := make(map[string]uint8)
rmsgs, rerr := getDefaultRoutes()
if rerr != nil {
return interfaces, rerr
}
for family, rmsg := range rmsgs {
_, oif, err := parsePREFSRC(rmsg)
if err != nil {
return interfaces, err
}
ifmsg, ierr := getIfaceLink(oif)
if ierr != nil {
return interfaces, ierr
}
attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
if aerr != nil {
return interfaces, aerr
}
for _, attr := range attrs {
if attr.Attr.Type == syscall.IFLA_IFNAME {
// key is an interface name
// possible values: 2 - AF_INET, 10 - AF_INET6, 12 - dualstack
interfaces[string(attr.Value[:len(attr.Value)-1])] += family
}
}
}
if len(interfaces) > 0 {
return interfaces, nil
}
return interfaces, errNoDefaultInterface
}
// parsePREFSRC returns preferred source address and output interface index (RTA_OIF).
func parsePREFSRC(m *syscall.NetlinkMessage) (host string, oif uint32, err error) {
var attrs []syscall.NetlinkRouteAttr
attrs, err = syscall.ParseNetlinkRouteAttr(m)
if err != nil {
return "", 0, err
}
for _, attr := range attrs {
if attr.Attr.Type == syscall.RTA_PREFSRC {
host = net.IP(attr.Value).String()
}
if attr.Attr.Type == syscall.RTA_OIF {
oif = cpuutil.ByteOrder().Uint32(attr.Value)
}
if host != "" && oif != uint32(0) {
break
}
}
if oif == 0 {
err = errNoDefaultRoute
}
return
}

View File

@ -0,0 +1,35 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package netutil
import "testing"
func TestGetDefaultInterface(t *testing.T) {
ifc, err := GetDefaultInterfaces()
if err != nil {
t.Fatal(err)
}
t.Logf("default network interfaces: %+v\n", ifc)
}
func TestGetDefaultHost(t *testing.T) {
ip, err := GetDefaultHost()
if err != nil {
t.Fatal(err)
}
t.Logf("default ip: %v", ip)
}

View File

@ -19,6 +19,9 @@ type EntryFilter struct {
LocalPort int64
RemotePort int64
// for ps
TopCommandPath string
// for Proc
DiskDevice string
NetworkInterface string
@ -75,6 +78,11 @@ func WithTCP6() FilterFunc {
return func(ft *EntryFilter) { ft.TCP6 = true }
}
// WithTopCommandPath configures 'top' command path.
func WithTopCommandPath(path string) FilterFunc {
return func(ft *EntryFilter) { ft.TopCommandPath = path }
}
// WithDiskDevice to filter entries by disk device.
func WithDiskDevice(name string) FilterFunc {
return func(ft *EntryFilter) { ft.DiskDevice = name }
@ -111,4 +119,8 @@ func (ft *EntryFilter) applyOpts(opts []FilterFunc) {
if ft.LocalPort > 0 && ft.RemotePort > 0 {
panic(fmt.Errorf("can't query by both local(%d) and remote(%d) ports", ft.LocalPort, ft.RemotePort))
}
if ft.TopCommandPath == "" {
ft.TopCommandPath = DefaultTopPath
}
}

View File

@ -1,6 +1,6 @@
package psn
// updated at 2017-01-08 13:43:03.746568457 -0800 PST
// updated at 2017-01-31 14:04:04.136777234 -0800 PST
// NetDev is '/proc/net/dev' in Linux.
// The dev pseudo-file contains network device status information.
@ -78,6 +78,57 @@ type NetTCP struct {
Inode string `column:"inode"`
}
// TopCommandRow represents a row in 'top' command output.
type TopCommandRow struct {
// PID is pid of the process.
PID int64 `column:"pid"`
// USER is user name.
USER string `column:"user"`
// PR is priority.
PR string `column:"pr"`
// NI is nice value of the task.
NI string `column:"ni"`
// VIRT is total amount of virtual memory used by the task (in KiB).
VIRT string `column:"virt"`
VIRTBytesN uint64 `column:"virt_bytes_n"`
VIRTParsedBytes string `column:"virt_parsed_bytes"`
// RES is non-swapped physical memory a task is using (in KiB).
RES string `column:"res"`
RESBytesN uint64 `column:"res_bytes_n"`
RESParsedBytes string `column:"res_parsed_bytes"`
// SHR is amount of shared memory available to a task, not all of which is typically resident (in KiB).
SHR string `column:"shr"`
SHRBytesN uint64 `column:"shr_bytes_n"`
SHRParsedBytes string `column:"shr_parsed_bytes"`
// S is process status.
S string `column:"s"`
SParsedStatus string `column:"s_parsed_status"`
// CPUPercent is %CPU.
CPUPercent float64 `column:"cpupercent"`
// MEMPercent is %MEM.
MEMPercent float64 `column:"mempercent"`
// TIME is CPU time (TIME+).
TIME string `column:"time"`
// COMMAND is command.
COMMAND string `column:"command"`
}
// LoadAvg is '/proc/loadavg' in Linux.
type LoadAvg struct {
// LoadAvg1Minute is total uptime in seconds.
LoadAvg1Minute float64 `column:"load_avg_1_minute"`
// LoadAvg5Minute is total uptime in seconds.
LoadAvg5Minute float64 `column:"load_avg_5_minute"`
// LoadAvg15Minute is total uptime in seconds.
LoadAvg15Minute float64 `column:"load_avg_15_minute"`
// RunnableKernelSchedulingEntities is number of currently runnable kernel scheduling entities (processes, threads).
RunnableKernelSchedulingEntities int64 `column:"runnable_kernel_scheduling_entities"`
// CurrentKernelSchedulingEntities is number of kernel scheduling entities that currently exist on the system.
CurrentKernelSchedulingEntities int64 `column:"current_kernel_scheduling_entities"`
// Pid is PID of the process that was most recently created on the system.
Pid int64 `column:"pid"`
}
// Uptime is '/proc/uptime' in Linux.
type Uptime struct {
// UptimeTotal is total uptime in seconds.
@ -264,8 +315,7 @@ type Stat struct {
// EnvEnd is address below which program environment is placed.
EnvEnd uint64 `column:"env_end"`
// ExitCode is thread's exit status in the form reported by waitpid(2).
ExitCode int64 `column:"exit_code"`
CpuUsage float64 `column:"cpu_usage"`
ExitCode int64 `column:"exit_code"`
}
// Status is '/proc/$PID/status' in Linux.

View File

@ -34,7 +34,7 @@ type PSEntry struct {
VMSizeNum uint64
}
const maxConcurrentProcStat = 32
const maxConcurrentProcFDLimit = 32
// GetPS finds all PSEntry by given filter.
func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) {
@ -61,13 +61,9 @@ func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) {
// applyOpts already panic when ft.ProgramMatchFunc != nil && ft.PID > 0
}
up, err := GetProcUptime()
if err != nil {
return nil, err
}
// can't filter both by program and by PID
if len(pids) == 0 {
// find PIDs by Program
// list all PIDs, or later to match by Program
if pids, err = ListPIDs(); err != nil {
return
}
@ -75,10 +71,33 @@ func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) {
ft.ProgramMatchFunc = func(string) bool { return true }
}
var topRows []TopCommandRow
if len(pids) == 1 {
topRows, err = GetTop(ft.TopCommandPath, pids[0])
if err != nil {
return
}
} else {
topRows, err = GetTop(ft.TopCommandPath, 0)
if err != nil {
return
}
}
topM := make(map[int64]TopCommandRow, len(topRows))
for _, row := range topRows {
topM[row.PID] = row
}
for _, pid := range pids {
if _, ok := topM[pid]; !ok {
topM[pid] = TopCommandRow{PID: pid}
log.Printf("PID %d is not found at 'top' command output", pid)
}
}
var pmu sync.RWMutex
var wg sync.WaitGroup
wg.Add(len(pids))
limitc := make(chan struct{}, maxConcurrentProcStat)
limitc := make(chan struct{}, maxConcurrentProcFDLimit)
for _, pid := range pids {
go func(pid int64) {
defer func() {
@ -88,12 +107,8 @@ func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) {
limitc <- struct{}{}
stat, err := GetProcStatByPID(pid, up)
if err != nil {
log.Printf("GetProcStatByPID error %v for PID %d", err, pid)
return
}
if !ft.ProgramMatchFunc(stat.Comm) {
topRow := topM[pid]
if !ft.ProgramMatchFunc(topRow.COMMAND) {
return
}
@ -104,7 +119,7 @@ func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) {
return
}
ent, err := getPSEntry(pid, stat)
ent, err := getPSEntry(pid, topRow)
if err != nil {
log.Printf("getPSEntry error %v for PID %d", err, pid)
return
@ -123,20 +138,20 @@ func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) {
return
}
func getPSEntry(pid int64, stat Stat) (PSEntry, error) {
func getPSEntry(pid int64, topRow TopCommandRow) (PSEntry, error) {
status, err := GetProcStatusByPID(pid)
if err != nil {
return PSEntry{}, err
}
entry := PSEntry{
Program: stat.Comm,
State: stat.StateParsedStatus,
Program: status.Name,
State: status.StateParsedStatus,
PID: stat.Pid,
PPID: stat.Ppid,
PID: status.Pid,
PPID: status.PPid,
CPU: fmt.Sprintf("%3.2f %%", stat.CpuUsage),
CPU: fmt.Sprintf("%3.2f %%", topRow.CPUPercent),
VMRSS: status.VmRSSParsedBytes,
VMSize: status.VmSizeParsedBytes,
@ -146,7 +161,7 @@ func getPSEntry(pid int64, stat Stat) (PSEntry, error) {
VoluntaryCtxtSwitches: status.VoluntaryCtxtSwitches,
NonvoluntaryCtxtSwitches: status.NonvoluntaryCtxtSwitches,
CPUNum: stat.CpuUsage,
CPUNum: topRow.CPUPercent,
VMRSSNum: status.VmRSSBytesN,
VMSizeNum: status.VmSizeBytesN,
}

View File

@ -54,139 +54,69 @@ func GetSS(opts ...FilterFunc) (sss []SSEntry, err error) {
// applyOpts already panic when ft.ProgramMatchFunc != nil && ft.PID > 0
}
var pmu sync.RWMutex
var wg sync.WaitGroup
if len(pids) > 0 {
// we already know PIDs to query
up, err := GetProcUptime()
if err != nil {
return nil, err
}
wg.Add(len(pids))
if ft.TCP && ft.TCP6 {
wg.Add(len(pids))
}
for _, pid := range pids {
if ft.TCP {
go func(pid int64) {
defer wg.Done()
ents, err := getSSEntry(pid, TypeTCP, ft.LocalPort, ft.RemotePort)
if err != nil {
log.Printf("getSSEntry error %v for PID %d", err, pid)
return
}
pmu.RLock()
done := ft.TopLimit > 0 && len(sss) >= ft.TopLimit
pmu.RUnlock()
if done {
return
}
pmu.Lock()
sss = append(sss, ents...)
pmu.Unlock()
}(pid)
}
if ft.TCP6 {
go func(pid int64) {
defer wg.Done()
ents, err := getSSEntry(pid, TypeTCP6, ft.LocalPort, ft.RemotePort)
if err != nil {
log.Printf("getSSEntry error %v for PID %d", err, pid)
return
}
pmu.RLock()
done := ft.TopLimit > 0 && len(sss) >= ft.TopLimit
pmu.RUnlock()
if done {
return
}
pmu.Lock()
sss = append(sss, ents...)
pmu.Unlock()
}(pid)
}
if len(pids) == 0 {
// find PIDs by Program
if pids, err = ListPIDs(); err != nil {
return
}
} else {
// find PIDs by Program
pids, err = ListPIDs()
// already know PIDs to query
ft.ProgramMatchFunc = func(string) bool { return true }
}
var pmu sync.RWMutex
var wg sync.WaitGroup
limitc := make(chan struct{}, maxConcurrentProcFDLimit)
f := func(pid int64, ttype TransportProtocol) {
defer func() {
<-limitc
wg.Done()
}()
limitc <- struct{}{}
stat, err := GetProcStatByPID(pid, up)
if err != nil {
log.Printf("GetProcStatByPID error %v for PID %d", err, pid)
return
}
if !ft.ProgramMatchFunc(stat.Comm) {
return
}
up, err := GetProcUptime()
pmu.RLock()
done := ft.TopLimit > 0 && len(sss) >= ft.TopLimit
pmu.RUnlock()
if done {
return
}
ents, err := getSSEntry(pid, ttype, ft.LocalPort, ft.RemotePort)
if err != nil {
return nil, err
log.Printf("getSSEntry error %v for PID %d", err, pid)
return
}
pmu.Lock()
sss = append(sss, ents...)
pmu.Unlock()
}
wg.Add(len(pids))
if ft.TCP && ft.TCP6 {
wg.Add(len(pids))
if ft.TCP && ft.TCP6 {
wg.Add(len(pids))
}
for _, pid := range pids {
if ft.TCP {
go f(pid, TypeTCP)
}
for _, pid := range pids {
if ft.TCP {
go func(pid int64) {
defer wg.Done()
stat, err := GetProcStatByPID(pid, up)
if err != nil {
log.Printf("GetProcStatByPID error %v for PID %d", err, pid)
return
}
if !ft.ProgramMatchFunc(stat.Comm) {
return
}
pmu.RLock()
done := ft.TopLimit > 0 && len(sss) >= ft.TopLimit
pmu.RUnlock()
if done {
return
}
ents, err := getSSEntry(pid, TypeTCP, ft.LocalPort, ft.RemotePort)
if err != nil {
log.Printf("getSSEntry error %v for PID %d", err, pid)
return
}
pmu.Lock()
sss = append(sss, ents...)
pmu.Unlock()
}(pid)
}
if ft.TCP6 {
go func(pid int64) {
defer wg.Done()
stat, err := GetProcStatByPID(pid, up)
if err != nil {
log.Printf("GetProcStatByPID error %v for PID %d", err, pid)
return
}
if !ft.ProgramMatchFunc(stat.Comm) {
return
}
pmu.RLock()
done := ft.TopLimit > 0 && len(sss) >= ft.TopLimit
pmu.RUnlock()
if done {
return
}
ents, err := getSSEntry(pid, TypeTCP6, ft.LocalPort, ft.RemotePort)
if err != nil {
log.Printf("getSSEntry error %v for PID %d", err, pid)
return
}
pmu.Lock()
sss = append(sss, ents...)
pmu.Unlock()
}(pid)
}
if ft.TCP6 {
go f(pid, TypeTCP6)
}
}
wg.Wait()

222
vendor/github.com/gyuho/psn/proc.go generated vendored
View File

@ -1,222 +0,0 @@
package psn
import (
"fmt"
"io/ioutil"
"time"
)
// Proc represents an entry of various system statistics.
type Proc struct {
UnixTS int64
PSEntry PSEntry
DSEntry DSEntry
ReadsCompletedDelta uint64
SectorsReadDelta uint64
WritesCompletedDelta uint64
SectorsWrittenDelta uint64
NSEntry NSEntry
ReceiveBytesDelta string
ReceivePacketsDelta uint64
TransmitBytesDelta string
TransmitPacketsDelta uint64
ReceiveBytesNumDelta uint64
TransmitBytesNumDelta uint64
// Extra exists to support customized data query.
Extra []byte
}
// GetProc returns current 'Proc' data.
// PID is required.
// Disk device, network interface, extra path are optional.
func GetProc(opts ...FilterFunc) (Proc, error) {
ft := &EntryFilter{}
ft.applyOpts(opts)
if ft.PID == 0 {
return Proc{}, fmt.Errorf("unknown PID %d", ft.PID)
}
proc := Proc{UnixTS: time.Now().Unix()}
errc := make(chan error)
go func() {
// get process stats
ets, err := GetPS(WithPID(ft.PID))
if err != nil {
errc <- err
return
}
if len(ets) != 1 {
errc <- fmt.Errorf("len(PID=%d entries) != 1 (got %d)", ft.PID, len(ets))
return
}
proc.PSEntry = ets[0]
errc <- nil
}()
if ft.DiskDevice != "" {
go func() {
// get diskstats
ds, err := GetDS()
if err != nil {
errc <- err
return
}
for _, elem := range ds {
if elem.Device == ft.DiskDevice {
proc.DSEntry = elem
break
}
}
errc <- nil
}()
}
if ft.NetworkInterface != "" {
go func() {
// get network I/O stats
ns, err := GetNS()
if err != nil {
errc <- err
return
}
for _, elem := range ns {
if elem.Interface == ft.NetworkInterface {
proc.NSEntry = elem
break
}
}
errc <- nil
}()
}
if ft.ExtraPath != "" {
go func() {
f, err := openToRead(ft.ExtraPath)
if err != nil {
errc <- err
return
}
b, err := ioutil.ReadAll(f)
if err != nil {
errc <- err
return
}
proc.Extra = b
errc <- nil
}()
}
cnt := 0
for cnt != len(opts) {
err := <-errc
if err != nil {
return Proc{}, err
}
cnt++
}
if ft.DiskDevice != "" {
if proc.DSEntry.Device == "" {
return Proc{}, fmt.Errorf("disk device %q was not found", ft.DiskDevice)
}
}
if ft.NetworkInterface != "" {
if proc.NSEntry.Interface == "" {
return Proc{}, fmt.Errorf("network interface %q was not found", ft.NetworkInterface)
}
}
return proc, nil
}
var (
// ProcHeader lists all Proc CSV columns.
ProcHeader = append([]string{"UNIX-TS"}, columnsPSEntry...)
// ProcHeaderIndex maps each Proc column name to its index in row.
ProcHeaderIndex = make(map[string]int)
)
func init() {
// more columns to 'ProcHeader'
ProcHeader = append(ProcHeader, columnsDSEntry...)
ProcHeader = append(ProcHeader, columnsNSEntry...)
ProcHeader = append(ProcHeader,
"READS-COMPLETED-DELTA",
"SECTORS-READ-DELTA",
"WRITES-COMPLETED-DELTA",
"SECTORS-WRITTEN-DELTA",
"RECEIVE-BYTES-DELTA",
"RECEIVE-PACKETS-DELTA",
"TRANSMIT-BYTES-DELTA",
"TRANSMIT-PACKETS-DELTA",
"RECEIVE-BYTES-NUM-DELTA",
"TRANSMIT-BYTES-NUM-DELTA",
"EXTRA",
)
for i, v := range ProcHeader {
ProcHeaderIndex[v] = i
}
}
// ToRow converts 'Proc' to string slice.
func (p *Proc) ToRow() (row []string) {
row = make([]string, len(ProcHeader))
row[0] = fmt.Sprintf("%d", p.UnixTS) // UNIX-TS
row[1] = p.PSEntry.Program // PROGRAM
row[2] = p.PSEntry.State // STATE
row[3] = fmt.Sprintf("%d", p.PSEntry.PID) // PID
row[4] = fmt.Sprintf("%d", p.PSEntry.PPID) // PPID
row[5] = p.PSEntry.CPU // CPU
row[6] = p.PSEntry.VMRSS // VMRSS
row[7] = p.PSEntry.VMSize // VMSIZE
row[8] = fmt.Sprintf("%d", p.PSEntry.FD) // FD
row[9] = fmt.Sprintf("%d", p.PSEntry.Threads) // THREADS
row[10] = fmt.Sprintf("%d", p.PSEntry.Threads) // VOLUNTARY-CTXT-SWITCHES
row[11] = fmt.Sprintf("%d", p.PSEntry.Threads) // NON-VOLUNTARY-CTXT-SWITCHES
row[12] = fmt.Sprintf("%3.2f", p.PSEntry.CPUNum) // CPU-NUM
row[13] = fmt.Sprintf("%d", p.PSEntry.VMRSSNum) // VMRSS-NUM
row[14] = fmt.Sprintf("%d", p.PSEntry.VMSizeNum) // VMSIZE-NUM
row[15] = p.DSEntry.Device // DEVICE
row[16] = fmt.Sprintf("%d", p.DSEntry.ReadsCompleted) // READS-COMPLETED
row[17] = fmt.Sprintf("%d", p.DSEntry.SectorsRead) // SECTORS-READ
row[18] = p.DSEntry.TimeSpentOnReading // TIME(READS)
row[19] = fmt.Sprintf("%d", p.DSEntry.WritesCompleted) // WRITES-COMPLETED
row[20] = fmt.Sprintf("%d", p.DSEntry.SectorsWritten) // SECTORS-WRITTEN
row[21] = p.DSEntry.TimeSpentOnWriting // TIME(WRITES)
row[22] = fmt.Sprintf("%d", p.DSEntry.TimeSpentOnReadingMs) // MILLISECONDS(READS)
row[23] = fmt.Sprintf("%d", p.DSEntry.TimeSpentOnWritingMs) // MILLISECONDS(WRITES)
row[24] = p.NSEntry.Interface // INTERFACE
row[25] = p.NSEntry.ReceiveBytes // RECEIVE-BYTES
row[26] = fmt.Sprintf("%d", p.NSEntry.ReceivePackets) // RECEIVE-PACKETS
row[27] = p.NSEntry.TransmitBytes // TRANSMIT-BYTES
row[28] = fmt.Sprintf("%d", p.NSEntry.TransmitPackets) // TRANSMIT-PACKETS
row[29] = fmt.Sprintf("%d", p.NSEntry.ReceiveBytesNum) // RECEIVE-BYTES-NUM
row[30] = fmt.Sprintf("%d", p.NSEntry.TransmitBytesNum) // TRANSMIT-BYTES-NUM
row[31] = fmt.Sprintf("%d", p.ReadsCompletedDelta) // READS-COMPLETED-DELTA
row[32] = fmt.Sprintf("%d", p.SectorsReadDelta) // SECTORS-READ-DELTA
row[33] = fmt.Sprintf("%d", p.WritesCompletedDelta) // WRITES-COMPLETED-DELTA
row[34] = fmt.Sprintf("%d", p.SectorsWrittenDelta) // SECTORS-WRITTEN-DELTA
row[35] = p.ReceiveBytesDelta // RECEIVE-BYTES-DELTA
row[36] = fmt.Sprintf("%d", p.ReceivePacketsDelta) // RECEIVE-PACKETS-DELTA
row[37] = p.TransmitBytesDelta // TRANSMIT-BYTES-DELTA
row[38] = fmt.Sprintf("%d", p.TransmitPacketsDelta) // TRANSMIT-PACKETS-DELTA
row[39] = fmt.Sprintf("%d", p.ReceiveBytesNumDelta) // RECEIVE-BYTES-NUM-DELTA
row[40] = fmt.Sprintf("%d", p.TransmitBytesNumDelta) // TRANSMIT-BYTES-NUM-DELTA
row[41] = string(p.Extra) // EXTRA
return
}

View File

@ -1,429 +1,261 @@
package psn
import (
"encoding/csv"
"fmt"
"strconv"
humanize "github.com/dustin/go-humanize"
"io/ioutil"
"time"
)
// CSV represents CSV data (header, rows, etc.).
type CSV struct {
FilePath string
PID int64
DiskDevice string
NetworkInterface string
// Proc represents an entry of various system statistics.
type Proc struct {
// UnixNanosecond is unix nano second when this Proc row gets created.
UnixNanosecond int64
Header []string
HeaderIndex map[string]int
// UnixSecond is the converted Unix seconds from UnixNano.
UnixSecond int64
MinUnixTS int64
MaxUnixTS int64
PSEntry PSEntry
// ExtraPath contains extra information.
ExtraPath string
LoadAvg LoadAvg
// Rows are sorted by unix seconds.
Rows []Proc
DSEntry DSEntry
ReadsCompletedDelta uint64
SectorsReadDelta uint64
WritesCompletedDelta uint64
SectorsWrittenDelta uint64
NSEntry NSEntry
ReceiveBytesDelta string
ReceivePacketsDelta uint64
TransmitBytesDelta string
TransmitPacketsDelta uint64
ReceiveBytesNumDelta uint64
TransmitBytesNumDelta uint64
// Extra exists to support customized data query.
Extra []byte
}
// NewCSV returns a new CSV.
func NewCSV(fpath string, pid int64, diskDevice string, networkInterface string, extraPath string) *CSV {
return &CSV{
FilePath: fpath,
PID: pid,
DiskDevice: diskDevice,
NetworkInterface: networkInterface,
type ProcSlice []Proc
Header: ProcHeader,
HeaderIndex: ProcHeaderIndex,
MinUnixTS: 0,
MaxUnixTS: 0,
ExtraPath: extraPath,
Rows: []Proc{},
func (p ProcSlice) Len() int { return len(p) }
func (p ProcSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p ProcSlice) Less(i, j int) bool {
if p[i].UnixNanosecond != p[j].UnixNanosecond {
return p[i].UnixNanosecond < p[j].UnixNanosecond
}
return p[i].UnixSecond < p[j].UnixSecond
}
// Add is to be called periodically to add a row to CSV.
// It only appends to CSV. And it estimates empty rows by unix seconds.
func (c *CSV) Add() error {
cur, err := GetProc(
WithPID(c.PID),
WithDiskDevice(c.DiskDevice),
WithNetworkInterface(c.NetworkInterface),
WithExtraPath(c.ExtraPath),
// GetProc returns current 'Proc' data.
// PID is required.
// Disk device, network interface, extra path are optional.
func GetProc(opts ...FilterFunc) (Proc, error) {
ft := &EntryFilter{}
ft.applyOpts(opts)
if ft.PID == 0 {
return Proc{}, fmt.Errorf("unknown PID %d", ft.PID)
}
ts := time.Now().UnixNano()
proc := Proc{UnixNanosecond: ts, UnixSecond: ConvertUnixNano(ts)}
errc := make(chan error)
go func() {
// get process stats
ets, err := GetPS(WithPID(ft.PID))
if err != nil {
errc <- err
return
}
if len(ets) != 1 {
errc <- fmt.Errorf("len(PID=%d entries) != 1 (got %d)", ft.PID, len(ets))
return
}
proc.PSEntry = ets[0]
errc <- nil
}()
go func() {
lvg, err := GetProcLoadAvg()
if err != nil {
errc <- err
return
}
proc.LoadAvg = lvg
errc <- nil
}()
if ft.DiskDevice != "" {
go func() {
// get diskstats
ds, err := GetDS()
if err != nil {
errc <- err
return
}
for _, elem := range ds {
if elem.Device == ft.DiskDevice {
proc.DSEntry = elem
break
}
}
errc <- nil
}()
}
if ft.NetworkInterface != "" {
go func() {
// get network I/O stats
ns, err := GetNS()
if err != nil {
errc <- err
return
}
for _, elem := range ns {
if elem.Interface == ft.NetworkInterface {
proc.NSEntry = elem
break
}
}
errc <- nil
}()
}
if ft.ExtraPath != "" {
go func() {
f, err := openToRead(ft.ExtraPath)
if err != nil {
errc <- err
return
}
b, err := ioutil.ReadAll(f)
if err != nil {
errc <- err
return
}
proc.Extra = b
errc <- nil
}()
}
cnt := 0
for cnt != len(opts)+1 { // include load avg query
err := <-errc
if err != nil {
return Proc{}, err
}
cnt++
}
if ft.DiskDevice != "" {
if proc.DSEntry.Device == "" {
return Proc{}, fmt.Errorf("disk device %q was not found", ft.DiskDevice)
}
}
if ft.NetworkInterface != "" {
if proc.NSEntry.Interface == "" {
return Proc{}, fmt.Errorf("network interface %q was not found", ft.NetworkInterface)
}
}
return proc, nil
}
var (
// ProcHeader lists all Proc CSV columns.
ProcHeader = append([]string{"UNIX-NANOSECOND", "UNIX-SECOND"}, columnsPSEntry...)
// ProcHeaderIndex maps each Proc column name to its index in row.
ProcHeaderIndex = make(map[string]int)
)
func init() {
// more columns to 'ProcHeader'
ProcHeader = append(ProcHeader,
"LOAD-AVERAGE-1-MINUTE",
"LOAD-AVERAGE-5-MINUTE",
"LOAD-AVERAGE-15-MINUTE",
)
if err != nil {
return err
ProcHeader = append(ProcHeader, columnsDSEntry...)
ProcHeader = append(ProcHeader, columnsNSEntry...)
ProcHeader = append(ProcHeader,
"READS-COMPLETED-DELTA",
"SECTORS-READ-DELTA",
"WRITES-COMPLETED-DELTA",
"SECTORS-WRITTEN-DELTA",
"RECEIVE-BYTES-DELTA",
"RECEIVE-PACKETS-DELTA",
"TRANSMIT-BYTES-DELTA",
"TRANSMIT-PACKETS-DELTA",
"RECEIVE-BYTES-NUM-DELTA",
"TRANSMIT-BYTES-NUM-DELTA",
"EXTRA",
)
for i, v := range ProcHeader {
ProcHeaderIndex[v] = i
}
// first call; just append and return
if len(c.Rows) == 0 {
c.MinUnixTS = cur.UnixTS
c.MaxUnixTS = cur.UnixTS
c.Rows = []Proc{cur}
return nil
}
// compare with previous row before append
prev := c.Rows[len(c.Rows)-1]
if prev.UnixTS >= cur.UnixTS {
// ignore data with wrong seconds
return nil
}
// 'Add' only appends, so later unix should be max
c.MaxUnixTS = cur.UnixTS
if cur.UnixTS-prev.UnixTS == 1 {
cur.ReadsCompletedDelta = cur.DSEntry.ReadsCompleted - prev.DSEntry.ReadsCompleted
cur.SectorsReadDelta = cur.DSEntry.SectorsRead - prev.DSEntry.SectorsRead
cur.WritesCompletedDelta = cur.DSEntry.WritesCompleted - prev.DSEntry.WritesCompleted
cur.SectorsWrittenDelta = cur.DSEntry.SectorsWritten - prev.DSEntry.SectorsWritten
cur.ReceiveBytesNumDelta = cur.NSEntry.ReceiveBytesNum - prev.NSEntry.ReceiveBytesNum
cur.TransmitBytesNumDelta = cur.NSEntry.TransmitBytesNum - prev.NSEntry.TransmitBytesNum
cur.ReceivePacketsDelta = cur.NSEntry.ReceivePackets - prev.NSEntry.ReceivePackets
cur.TransmitPacketsDelta = cur.NSEntry.TransmitPackets - prev.NSEntry.TransmitPackets
cur.ReceiveBytesDelta = humanize.Bytes(cur.ReceiveBytesNumDelta)
cur.TransmitBytesDelta = humanize.Bytes(cur.TransmitBytesNumDelta)
c.Rows = append(c.Rows, cur)
return nil
}
// there are empty rows between; estimate and fill-in
tsDelta := cur.UnixTS - prev.UnixTS
nexts := make([]Proc, 0, tsDelta+1)
// estimate the previous ones based on 'prev' and 'cur'
mid := prev
// Extra; just use the previous value
mid.Extra = prev.Extra
// PSEntry; just use average since some metrisc might decrease
mid.PSEntry.FD = prev.PSEntry.FD + (cur.PSEntry.FD-prev.PSEntry.FD)/2
mid.PSEntry.Threads = prev.PSEntry.Threads + (cur.PSEntry.Threads-prev.PSEntry.Threads)/2
mid.PSEntry.CPUNum = prev.PSEntry.CPUNum + (cur.PSEntry.CPUNum-prev.PSEntry.CPUNum)/2
mid.PSEntry.VMRSSNum = prev.PSEntry.VMRSSNum + (cur.PSEntry.VMRSSNum-prev.PSEntry.VMRSSNum)/2
mid.PSEntry.VMSizeNum = prev.PSEntry.VMSizeNum + (cur.PSEntry.VMSizeNum-prev.PSEntry.VMSizeNum)/2
mid.PSEntry.CPU = fmt.Sprintf("%3.2f %%", mid.PSEntry.CPUNum)
mid.PSEntry.VMRSS = humanize.Bytes(mid.PSEntry.VMRSSNum)
mid.PSEntry.VMSize = humanize.Bytes(mid.PSEntry.VMSizeNum)
// DSEntry; calculate delta assuming that metrics are cumulative
mid.ReadsCompletedDelta = (cur.DSEntry.ReadsCompleted - prev.DSEntry.ReadsCompleted) / uint64(tsDelta)
mid.SectorsReadDelta = (cur.DSEntry.SectorsRead - prev.DSEntry.SectorsRead) / uint64(tsDelta)
mid.WritesCompletedDelta = (cur.DSEntry.WritesCompleted - prev.DSEntry.WritesCompleted) / uint64(tsDelta)
mid.SectorsWrittenDelta = (cur.DSEntry.SectorsWritten - prev.DSEntry.SectorsWritten) / uint64(tsDelta)
timeSpentOnReadingMsDelta := (cur.DSEntry.TimeSpentOnReadingMs - prev.DSEntry.TimeSpentOnReadingMs) / uint64(tsDelta)
timeSpentOnWritingMsDelta := (cur.DSEntry.TimeSpentOnWritingMs - prev.DSEntry.TimeSpentOnWritingMs) / uint64(tsDelta)
// NSEntry; calculate delta assuming that metrics are cumulative
mid.ReceiveBytesNumDelta = (cur.NSEntry.ReceiveBytesNum - prev.NSEntry.ReceiveBytesNum) / uint64(tsDelta)
mid.ReceiveBytesDelta = humanize.Bytes(mid.ReceiveBytesNumDelta)
mid.ReceivePacketsDelta = (cur.NSEntry.ReceivePackets - prev.NSEntry.ReceivePackets) / uint64(tsDelta)
mid.TransmitBytesNumDelta = (cur.NSEntry.TransmitBytesNum - prev.NSEntry.TransmitBytesNum) / uint64(tsDelta)
mid.TransmitBytesDelta = humanize.Bytes(mid.TransmitBytesNumDelta)
mid.TransmitPacketsDelta = (cur.NSEntry.TransmitPackets - prev.NSEntry.TransmitPackets) / uint64(tsDelta)
for i := int64(1); i < tsDelta; i++ {
ev := mid
ev.UnixTS = prev.UnixTS + i
ev.DSEntry.ReadsCompleted += mid.ReadsCompletedDelta * uint64(i)
ev.DSEntry.SectorsRead += mid.SectorsReadDelta * uint64(i)
ev.DSEntry.WritesCompleted += mid.WritesCompletedDelta * uint64(i)
ev.DSEntry.SectorsWritten += mid.SectorsWrittenDelta * uint64(i)
ev.DSEntry.TimeSpentOnReadingMs += timeSpentOnReadingMsDelta * uint64(i)
ev.DSEntry.TimeSpentOnWritingMs += timeSpentOnWritingMsDelta * uint64(i)
ev.DSEntry.TimeSpentOnReading = humanizeDurationMs(ev.DSEntry.TimeSpentOnReadingMs)
ev.DSEntry.TimeSpentOnWriting = humanizeDurationMs(ev.DSEntry.TimeSpentOnWritingMs)
ev.NSEntry.ReceiveBytesNum += mid.ReceiveBytesNumDelta * uint64(i)
ev.NSEntry.ReceiveBytes = humanize.Bytes(ev.NSEntry.ReceiveBytesNum)
ev.NSEntry.ReceivePackets += mid.ReceivePacketsDelta * uint64(i)
ev.NSEntry.TransmitBytesNum += mid.TransmitBytesNumDelta * uint64(i)
ev.NSEntry.TransmitBytes = humanize.Bytes(ev.NSEntry.TransmitBytesNum)
ev.NSEntry.TransmitPackets += mid.TransmitPacketsDelta * uint64(i)
nexts = append(nexts, ev)
}
// now previous entry is estimated; update 'cur' Delta metrics
realPrev := nexts[len(nexts)-1]
cur.ReadsCompletedDelta = cur.DSEntry.ReadsCompleted - realPrev.DSEntry.ReadsCompleted
cur.SectorsReadDelta = cur.DSEntry.SectorsRead - realPrev.DSEntry.SectorsRead
cur.WritesCompletedDelta = cur.DSEntry.WritesCompleted - realPrev.DSEntry.WritesCompleted
cur.SectorsWrittenDelta = cur.DSEntry.SectorsWritten - realPrev.DSEntry.SectorsWritten
cur.ReceiveBytesNumDelta = cur.NSEntry.ReceiveBytesNum - realPrev.NSEntry.ReceiveBytesNum
cur.TransmitBytesNumDelta = cur.NSEntry.TransmitBytesNum - realPrev.NSEntry.TransmitBytesNum
cur.ReceivePacketsDelta = cur.NSEntry.ReceivePackets - realPrev.NSEntry.ReceivePackets
cur.TransmitPacketsDelta = cur.NSEntry.TransmitPackets - realPrev.NSEntry.TransmitPackets
cur.ReceiveBytesDelta = humanize.Bytes(cur.ReceiveBytesNumDelta)
cur.TransmitBytesDelta = humanize.Bytes(cur.TransmitBytesNumDelta)
c.Rows = append(c.Rows, append(nexts, cur)...)
return nil
}
// Save saves CSV to disk.
func (c *CSV) Save() error {
f, err := openToAppend(c.FilePath)
if err != nil {
return err
}
defer f.Close()
// ToRow converts 'Proc' to string slice.
// Make sure to change this whenever 'Proc' fields are updated.
func (p *Proc) ToRow() (row []string) {
row = make([]string, len(ProcHeader))
row[0] = fmt.Sprintf("%d", p.UnixNanosecond) // UNIX-NANOSECOND
row[1] = fmt.Sprintf("%d", p.UnixSecond) // UNIX-SECOND
wr := csv.NewWriter(f)
if err := wr.Write(c.Header); err != nil {
return err
}
row[2] = p.PSEntry.Program // PROGRAM
row[3] = p.PSEntry.State // STATE
row[4] = fmt.Sprintf("%d", p.PSEntry.PID) // PID
row[5] = fmt.Sprintf("%d", p.PSEntry.PPID) // PPID
row[6] = p.PSEntry.CPU // CPU
row[7] = p.PSEntry.VMRSS // VMRSS
row[8] = p.PSEntry.VMSize // VMSIZE
row[9] = fmt.Sprintf("%d", p.PSEntry.FD) // FD
row[10] = fmt.Sprintf("%d", p.PSEntry.Threads) // THREADS
row[11] = fmt.Sprintf("%d", p.PSEntry.Threads) // VOLUNTARY-CTXT-SWITCHES
row[12] = fmt.Sprintf("%d", p.PSEntry.Threads) // NON-VOLUNTARY-CTXT-SWITCHES
row[13] = fmt.Sprintf("%3.2f", p.PSEntry.CPUNum) // CPU-NUM
row[14] = fmt.Sprintf("%d", p.PSEntry.VMRSSNum) // VMRSS-NUM
row[15] = fmt.Sprintf("%d", p.PSEntry.VMSizeNum) // VMSIZE-NUM
rows := make([][]string, len(c.Rows))
for i, row := range c.Rows {
rows[i] = row.ToRow()
}
if err := wr.WriteAll(rows); err != nil {
return err
}
row[16] = fmt.Sprintf("%3.2f", p.LoadAvg.LoadAvg1Minute) // LOAD-AVERAGE-1-MINUTE
row[17] = fmt.Sprintf("%3.2f", p.LoadAvg.LoadAvg5Minute) // LOAD-AVERAGE-5-MINUTE
row[18] = fmt.Sprintf("%3.2f", p.LoadAvg.LoadAvg15Minute) // LOAD-AVERAGE-15-MINUTE
wr.Flush()
return wr.Error()
}
// ReadCSV reads a CSV file and convert to 'CSV'.
func ReadCSV(fpath string) (*CSV, error) {
f, err := openToRead(fpath)
if err != nil {
return nil, err
}
defer f.Close()
rd := csv.NewReader(f)
// in case that rows have Deltaerent number of fields
rd.FieldsPerRecord = -1
rows, err := rd.ReadAll()
if err != nil {
return nil, err
}
if len(rows) <= 1 {
return nil, fmt.Errorf("expected len(rows)>1, got %d", len(rows))
}
if rows[0][0] != "UNIX-TS" {
return nil, fmt.Errorf("expected header at top, got %+v", rows[0])
}
// remove header
rows = rows[1:len(rows):len(rows)]
min, err := strconv.ParseInt(rows[0][0], 10, 64)
if err != nil {
return nil, err
}
max, err := strconv.ParseInt(rows[len(rows)-1][0], 10, 64)
if err != nil {
return nil, err
}
c := &CSV{
FilePath: fpath,
PID: 0,
DiskDevice: "",
NetworkInterface: "",
Header: ProcHeader,
HeaderIndex: ProcHeaderIndex,
MinUnixTS: min,
MaxUnixTS: max,
Rows: make([]Proc, 0, len(rows)),
}
for _, row := range rows {
ts, err := strconv.ParseInt(row[ProcHeaderIndex["UNIX-TS"]], 10, 64)
if err != nil {
return nil, err
}
pid, err := strconv.ParseInt(row[ProcHeaderIndex["PID"]], 10, 64)
if err != nil {
return nil, err
}
ppid, err := strconv.ParseInt(row[ProcHeaderIndex["PPID"]], 10, 64)
if err != nil {
return nil, err
}
fd, err := strconv.ParseUint(row[ProcHeaderIndex["FD"]], 10, 64)
if err != nil {
return nil, err
}
threads, err := strconv.ParseUint(row[ProcHeaderIndex["THREADS"]], 10, 64)
if err != nil {
return nil, err
}
volCtxNum, err := strconv.ParseUint(row[ProcHeaderIndex["VOLUNTARY-CTXT-SWITCHES"]], 10, 64)
if err != nil {
return nil, err
}
nonVolCtxNum, err := strconv.ParseUint(row[ProcHeaderIndex["NON-VOLUNTARY-CTXT-SWITCHES"]], 10, 64)
if err != nil {
return nil, err
}
cpuNum, err := strconv.ParseFloat(row[ProcHeaderIndex["CPU-NUM"]], 64)
if err != nil {
return nil, err
}
vmRssNum, err := strconv.ParseUint(row[ProcHeaderIndex["VMRSS-NUM"]], 10, 64)
if err != nil {
return nil, err
}
vmSizeNum, err := strconv.ParseUint(row[ProcHeaderIndex["VMSIZE-NUM"]], 10, 64)
if err != nil {
return nil, err
}
readsCompleted, err := strconv.ParseUint(row[ProcHeaderIndex["READS-COMPLETED"]], 10, 64)
if err != nil {
return nil, err
}
sectorsRead, err := strconv.ParseUint(row[ProcHeaderIndex["SECTORS-READ"]], 10, 64)
if err != nil {
return nil, err
}
writesCompleted, err := strconv.ParseUint(row[ProcHeaderIndex["WRITES-COMPLETED"]], 10, 64)
if err != nil {
return nil, err
}
sectorsWritten, err := strconv.ParseUint(row[ProcHeaderIndex["SECTORS-WRITTEN"]], 10, 64)
if err != nil {
return nil, err
}
timeSpentOnReadingMs, err := strconv.ParseUint(row[ProcHeaderIndex["MILLISECONDS(READS)"]], 10, 64)
if err != nil {
return nil, err
}
timeSpentOnWritingMs, err := strconv.ParseUint(row[ProcHeaderIndex["MILLISECONDS(WRITES)"]], 10, 64)
if err != nil {
return nil, err
}
readsCompletedDelta, err := strconv.ParseUint(row[ProcHeaderIndex["READS-COMPLETED-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
sectorsReadDelta, err := strconv.ParseUint(row[ProcHeaderIndex["SECTORS-READ-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
writesCompletedDelta, err := strconv.ParseUint(row[ProcHeaderIndex["WRITES-COMPLETED-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
sectorsWrittenDelta, err := strconv.ParseUint(row[ProcHeaderIndex["SECTORS-WRITTEN-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
receivePackets, err := strconv.ParseUint(row[ProcHeaderIndex["RECEIVE-PACKETS"]], 10, 64)
if err != nil {
return nil, err
}
transmitPackets, err := strconv.ParseUint(row[ProcHeaderIndex["TRANSMIT-PACKETS"]], 10, 64)
if err != nil {
return nil, err
}
receiveBytesNum, err := strconv.ParseUint(row[ProcHeaderIndex["RECEIVE-BYTES-NUM"]], 10, 64)
if err != nil {
return nil, err
}
transmitBytesNum, err := strconv.ParseUint(row[ProcHeaderIndex["TRANSMIT-BYTES-NUM"]], 10, 64)
if err != nil {
return nil, err
}
receivePacketsDelta, err := strconv.ParseUint(row[ProcHeaderIndex["RECEIVE-PACKETS-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
transmitPacketsDelta, err := strconv.ParseUint(row[ProcHeaderIndex["TRANSMIT-PACKETS-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
receiveBytesNumDelta, err := strconv.ParseUint(row[ProcHeaderIndex["RECEIVE-BYTES-NUM-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
transmitBytesNumDelta, err := strconv.ParseUint(row[ProcHeaderIndex["TRANSMIT-BYTES-NUM-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
proc := Proc{
UnixTS: ts,
PSEntry: PSEntry{
Program: row[ProcHeaderIndex["PROGRAM"]],
State: row[ProcHeaderIndex["STATE"]],
PID: pid,
PPID: ppid,
CPU: row[ProcHeaderIndex["CPU"]],
VMRSS: row[ProcHeaderIndex["VMRSS"]],
VMSize: row[ProcHeaderIndex["VMSIZE"]],
FD: fd,
Threads: threads,
VoluntaryCtxtSwitches: volCtxNum,
NonvoluntaryCtxtSwitches: nonVolCtxNum,
CPUNum: cpuNum,
VMRSSNum: vmRssNum,
VMSizeNum: vmSizeNum,
},
DSEntry: DSEntry{
Device: row[ProcHeaderIndex["DEVICE"]],
ReadsCompleted: readsCompleted,
SectorsRead: sectorsRead,
TimeSpentOnReading: row[ProcHeaderIndex["TIME(READS)"]],
WritesCompleted: writesCompleted,
SectorsWritten: sectorsWritten,
TimeSpentOnWriting: row[ProcHeaderIndex["TIME(WRITES)"]],
TimeSpentOnReadingMs: timeSpentOnReadingMs,
TimeSpentOnWritingMs: timeSpentOnWritingMs,
},
ReadsCompletedDelta: readsCompletedDelta,
SectorsReadDelta: sectorsReadDelta,
WritesCompletedDelta: writesCompletedDelta,
SectorsWrittenDelta: sectorsWrittenDelta,
NSEntry: NSEntry{
Interface: row[ProcHeaderIndex["INTERFACE"]],
ReceiveBytes: row[ProcHeaderIndex["RECEIVE-BYTES"]],
ReceivePackets: receivePackets,
TransmitBytes: row[ProcHeaderIndex["TRANSMIT-BYTES"]],
TransmitPackets: transmitPackets,
ReceiveBytesNum: receiveBytesNum,
TransmitBytesNum: transmitBytesNum,
},
ReceiveBytesDelta: row[ProcHeaderIndex["RECEIVE-BYTES-DELTA"]],
ReceivePacketsDelta: receivePacketsDelta,
TransmitBytesDelta: row[ProcHeaderIndex["TRANSMIT-BYTES-DELTA"]],
TransmitPacketsDelta: transmitPacketsDelta,
ReceiveBytesNumDelta: receiveBytesNumDelta,
TransmitBytesNumDelta: transmitBytesNumDelta,
Extra: []byte(row[ProcHeaderIndex["EXTRA"]]),
}
c.PID = proc.PSEntry.PID
c.DiskDevice = proc.DSEntry.Device
c.NetworkInterface = proc.NSEntry.Interface
c.Rows = append(c.Rows, proc)
}
return c, nil
row[19] = p.DSEntry.Device // DEVICE
row[20] = fmt.Sprintf("%d", p.DSEntry.ReadsCompleted) // READS-COMPLETED
row[21] = fmt.Sprintf("%d", p.DSEntry.SectorsRead) // SECTORS-READ
row[22] = p.DSEntry.TimeSpentOnReading // TIME(READS)
row[23] = fmt.Sprintf("%d", p.DSEntry.WritesCompleted) // WRITES-COMPLETED
row[24] = fmt.Sprintf("%d", p.DSEntry.SectorsWritten) // SECTORS-WRITTEN
row[25] = p.DSEntry.TimeSpentOnWriting // TIME(WRITES)
row[26] = fmt.Sprintf("%d", p.DSEntry.TimeSpentOnReadingMs) // MILLISECONDS(READS)
row[27] = fmt.Sprintf("%d", p.DSEntry.TimeSpentOnWritingMs) // MILLISECONDS(WRITES)
row[28] = p.NSEntry.Interface // INTERFACE
row[29] = p.NSEntry.ReceiveBytes // RECEIVE-BYTES
row[30] = fmt.Sprintf("%d", p.NSEntry.ReceivePackets) // RECEIVE-PACKETS
row[31] = p.NSEntry.TransmitBytes // TRANSMIT-BYTES
row[32] = fmt.Sprintf("%d", p.NSEntry.TransmitPackets) // TRANSMIT-PACKETS
row[33] = fmt.Sprintf("%d", p.NSEntry.ReceiveBytesNum) // RECEIVE-BYTES-NUM
row[34] = fmt.Sprintf("%d", p.NSEntry.TransmitBytesNum) // TRANSMIT-BYTES-NUM
row[35] = fmt.Sprintf("%d", p.ReadsCompletedDelta) // READS-COMPLETED-DELTA
row[36] = fmt.Sprintf("%d", p.SectorsReadDelta) // SECTORS-READ-DELTA
row[37] = fmt.Sprintf("%d", p.WritesCompletedDelta) // WRITES-COMPLETED-DELTA
row[38] = fmt.Sprintf("%d", p.SectorsWrittenDelta) // SECTORS-WRITTEN-DELTA
row[39] = p.ReceiveBytesDelta // RECEIVE-BYTES-DELTA
row[40] = fmt.Sprintf("%d", p.ReceivePacketsDelta) // RECEIVE-PACKETS-DELTA
row[41] = p.TransmitBytesDelta // TRANSMIT-BYTES-DELTA
row[42] = fmt.Sprintf("%d", p.TransmitPacketsDelta) // TRANSMIT-PACKETS-DELTA
row[43] = fmt.Sprintf("%d", p.ReceiveBytesNumDelta) // RECEIVE-BYTES-NUM-DELTA
row[44] = fmt.Sprintf("%d", p.TransmitBytesNumDelta) // TRANSMIT-BYTES-NUM-DELTA
row[45] = string(p.Extra) // EXTRA
return
}

386
vendor/github.com/gyuho/psn/proc_csv_add.go generated vendored Normal file
View File

@ -0,0 +1,386 @@
package psn
import (
"encoding/csv"
"fmt"
"strconv"
humanize "github.com/dustin/go-humanize"
)
// CSV represents CSV data (header, rows, etc.).
type CSV struct {
FilePath string
PID int64
DiskDevice string
NetworkInterface string
Header []string
HeaderIndex map[string]int
MinUnixNanosecond int64
MinUnixSecond int64
MaxUnixNanosecond int64
MaxUnixSecond int64
// ExtraPath contains extra information.
ExtraPath string
// Rows are sorted by unix time in nanoseconds.
// It's the number of nanoseconds (not seconds) elapsed
// since January 1, 1970 UTC.
Rows []Proc
}
// NewCSV returns a new CSV.
func NewCSV(fpath string, pid int64, diskDevice string, networkInterface string, extraPath string) *CSV {
return &CSV{
FilePath: fpath,
PID: pid,
DiskDevice: diskDevice,
NetworkInterface: networkInterface,
Header: ProcHeader,
HeaderIndex: ProcHeaderIndex,
MinUnixNanosecond: 0,
MinUnixSecond: 0,
MaxUnixNanosecond: 0,
MaxUnixSecond: 0,
ExtraPath: extraPath,
Rows: []Proc{},
}
}
// Add is called periodically to append a new entry to CSV; it only appends.
// If the data is used for time series, make sure to handle missing time stamps between.
// e.g. interpolate by estimating the averages between last row and new row to be inserted.
func (c *CSV) Add() error {
cur, err := GetProc(
WithPID(c.PID),
WithDiskDevice(c.DiskDevice),
WithNetworkInterface(c.NetworkInterface),
WithExtraPath(c.ExtraPath),
)
if err != nil {
return err
}
// first call; just append and return
if len(c.Rows) == 0 {
c.MinUnixNanosecond = cur.UnixNanosecond
c.MinUnixSecond = cur.UnixSecond
c.MaxUnixNanosecond = cur.UnixNanosecond
c.MaxUnixSecond = cur.UnixSecond
c.Rows = []Proc{cur}
return nil
}
// compare with previous row before append
prev := c.Rows[len(c.Rows)-1]
if prev.UnixNanosecond >= cur.UnixNanosecond {
return fmt.Errorf("clock went backwards: got %v, but expected more than %v", cur.UnixNanosecond, prev.UnixNanosecond)
}
// 'Add' only appends, so later unix should be max
c.MaxUnixNanosecond = cur.UnixNanosecond
c.MaxUnixSecond = cur.UnixSecond
cur.ReadsCompletedDelta = cur.DSEntry.ReadsCompleted - prev.DSEntry.ReadsCompleted
cur.SectorsReadDelta = cur.DSEntry.SectorsRead - prev.DSEntry.SectorsRead
cur.WritesCompletedDelta = cur.DSEntry.WritesCompleted - prev.DSEntry.WritesCompleted
cur.SectorsWrittenDelta = cur.DSEntry.SectorsWritten - prev.DSEntry.SectorsWritten
cur.ReceiveBytesNumDelta = cur.NSEntry.ReceiveBytesNum - prev.NSEntry.ReceiveBytesNum
cur.TransmitBytesNumDelta = cur.NSEntry.TransmitBytesNum - prev.NSEntry.TransmitBytesNum
cur.ReceivePacketsDelta = cur.NSEntry.ReceivePackets - prev.NSEntry.ReceivePackets
cur.TransmitPacketsDelta = cur.NSEntry.TransmitPackets - prev.NSEntry.TransmitPackets
cur.ReceiveBytesDelta = humanize.Bytes(cur.ReceiveBytesNumDelta)
cur.TransmitBytesDelta = humanize.Bytes(cur.TransmitBytesNumDelta)
c.Rows = append(c.Rows, cur)
return nil
}
// Save saves CSV to disk.
func (c *CSV) Save() error {
f, err := openToAppend(c.FilePath)
if err != nil {
return err
}
defer f.Close()
wr := csv.NewWriter(f)
if err := wr.Write(c.Header); err != nil {
return err
}
rows := make([][]string, len(c.Rows))
for i, row := range c.Rows {
rows[i] = row.ToRow()
}
if err := wr.WriteAll(rows); err != nil {
return err
}
wr.Flush()
return wr.Error()
}
// ReadCSV reads a CSV file and convert to 'CSV'.
// Make sure to change this whenever 'Proc' fields are updated.
func ReadCSV(fpath string) (*CSV, error) {
f, err := openToRead(fpath)
if err != nil {
return nil, err
}
defer f.Close()
rd := csv.NewReader(f)
// in case that rows have Deltaerent number of fields
rd.FieldsPerRecord = -1
rows, err := rd.ReadAll()
if err != nil {
return nil, err
}
if len(rows) <= 1 {
return nil, fmt.Errorf("expected len(rows)>1, got %d", len(rows))
}
if rows[0][0] != "UNIX-NANOSECOND" {
return nil, fmt.Errorf("expected header at top, got %+v", rows[0])
}
// remove header
rows = rows[1:len(rows):len(rows)]
min, err := strconv.ParseInt(rows[0][0], 10, 64)
if err != nil {
return nil, err
}
max, err := strconv.ParseInt(rows[len(rows)-1][0], 10, 64)
if err != nil {
return nil, err
}
c := &CSV{
FilePath: fpath,
PID: 0,
DiskDevice: "",
NetworkInterface: "",
Header: ProcHeader,
HeaderIndex: ProcHeaderIndex,
MinUnixNanosecond: min,
MinUnixSecond: ConvertUnixNano(min),
MaxUnixNanosecond: max,
MaxUnixSecond: ConvertUnixNano(max),
Rows: make([]Proc, 0, len(rows)),
}
for _, row := range rows {
ts, err := strconv.ParseInt(row[ProcHeaderIndex["UNIX-NANOSECOND"]], 10, 64)
if err != nil {
return nil, err
}
tss, err := strconv.ParseInt(row[ProcHeaderIndex["UNIX-SECOND"]], 10, 64)
if err != nil {
return nil, err
}
pid, err := strconv.ParseInt(row[ProcHeaderIndex["PID"]], 10, 64)
if err != nil {
return nil, err
}
ppid, err := strconv.ParseInt(row[ProcHeaderIndex["PPID"]], 10, 64)
if err != nil {
return nil, err
}
fd, err := strconv.ParseUint(row[ProcHeaderIndex["FD"]], 10, 64)
if err != nil {
return nil, err
}
threads, err := strconv.ParseUint(row[ProcHeaderIndex["THREADS"]], 10, 64)
if err != nil {
return nil, err
}
volCtxNum, err := strconv.ParseUint(row[ProcHeaderIndex["VOLUNTARY-CTXT-SWITCHES"]], 10, 64)
if err != nil {
return nil, err
}
nonVolCtxNum, err := strconv.ParseUint(row[ProcHeaderIndex["NON-VOLUNTARY-CTXT-SWITCHES"]], 10, 64)
if err != nil {
return nil, err
}
cpuNum, err := strconv.ParseFloat(row[ProcHeaderIndex["CPU-NUM"]], 64)
if err != nil {
return nil, err
}
vmRssNum, err := strconv.ParseUint(row[ProcHeaderIndex["VMRSS-NUM"]], 10, 64)
if err != nil {
return nil, err
}
vmSizeNum, err := strconv.ParseUint(row[ProcHeaderIndex["VMSIZE-NUM"]], 10, 64)
if err != nil {
return nil, err
}
loadAvg1min, err := strconv.ParseFloat(row[ProcHeaderIndex["LOAD-AVERAGE-1-MINUTE"]], 64)
if err != nil {
return nil, err
}
loadAvg5min, err := strconv.ParseFloat(row[ProcHeaderIndex["LOAD-AVERAGE-5-MINUTE"]], 64)
if err != nil {
return nil, err
}
loadAvg15min, err := strconv.ParseFloat(row[ProcHeaderIndex["LOAD-AVERAGE-15-MINUTE"]], 64)
if err != nil {
return nil, err
}
readsCompleted, err := strconv.ParseUint(row[ProcHeaderIndex["READS-COMPLETED"]], 10, 64)
if err != nil {
return nil, err
}
sectorsRead, err := strconv.ParseUint(row[ProcHeaderIndex["SECTORS-READ"]], 10, 64)
if err != nil {
return nil, err
}
writesCompleted, err := strconv.ParseUint(row[ProcHeaderIndex["WRITES-COMPLETED"]], 10, 64)
if err != nil {
return nil, err
}
sectorsWritten, err := strconv.ParseUint(row[ProcHeaderIndex["SECTORS-WRITTEN"]], 10, 64)
if err != nil {
return nil, err
}
timeSpentOnReadingMs, err := strconv.ParseUint(row[ProcHeaderIndex["MILLISECONDS(READS)"]], 10, 64)
if err != nil {
return nil, err
}
timeSpentOnWritingMs, err := strconv.ParseUint(row[ProcHeaderIndex["MILLISECONDS(WRITES)"]], 10, 64)
if err != nil {
return nil, err
}
readsCompletedDelta, err := strconv.ParseUint(row[ProcHeaderIndex["READS-COMPLETED-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
sectorsReadDelta, err := strconv.ParseUint(row[ProcHeaderIndex["SECTORS-READ-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
writesCompletedDelta, err := strconv.ParseUint(row[ProcHeaderIndex["WRITES-COMPLETED-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
sectorsWrittenDelta, err := strconv.ParseUint(row[ProcHeaderIndex["SECTORS-WRITTEN-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
receivePackets, err := strconv.ParseUint(row[ProcHeaderIndex["RECEIVE-PACKETS"]], 10, 64)
if err != nil {
return nil, err
}
transmitPackets, err := strconv.ParseUint(row[ProcHeaderIndex["TRANSMIT-PACKETS"]], 10, 64)
if err != nil {
return nil, err
}
receiveBytesNum, err := strconv.ParseUint(row[ProcHeaderIndex["RECEIVE-BYTES-NUM"]], 10, 64)
if err != nil {
return nil, err
}
transmitBytesNum, err := strconv.ParseUint(row[ProcHeaderIndex["TRANSMIT-BYTES-NUM"]], 10, 64)
if err != nil {
return nil, err
}
receivePacketsDelta, err := strconv.ParseUint(row[ProcHeaderIndex["RECEIVE-PACKETS-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
transmitPacketsDelta, err := strconv.ParseUint(row[ProcHeaderIndex["TRANSMIT-PACKETS-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
receiveBytesNumDelta, err := strconv.ParseUint(row[ProcHeaderIndex["RECEIVE-BYTES-NUM-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
transmitBytesNumDelta, err := strconv.ParseUint(row[ProcHeaderIndex["TRANSMIT-BYTES-NUM-DELTA"]], 10, 64)
if err != nil {
return nil, err
}
proc := Proc{
UnixNanosecond: ts,
UnixSecond: tss,
PSEntry: PSEntry{
Program: row[ProcHeaderIndex["PROGRAM"]],
State: row[ProcHeaderIndex["STATE"]],
PID: pid,
PPID: ppid,
CPU: row[ProcHeaderIndex["CPU"]],
VMRSS: row[ProcHeaderIndex["VMRSS"]],
VMSize: row[ProcHeaderIndex["VMSIZE"]],
FD: fd,
Threads: threads,
VoluntaryCtxtSwitches: volCtxNum,
NonvoluntaryCtxtSwitches: nonVolCtxNum,
CPUNum: cpuNum,
VMRSSNum: vmRssNum,
VMSizeNum: vmSizeNum,
},
LoadAvg: LoadAvg{
LoadAvg1Minute: loadAvg1min,
LoadAvg5Minute: loadAvg5min,
LoadAvg15Minute: loadAvg15min,
},
DSEntry: DSEntry{
Device: row[ProcHeaderIndex["DEVICE"]],
ReadsCompleted: readsCompleted,
SectorsRead: sectorsRead,
TimeSpentOnReading: row[ProcHeaderIndex["TIME(READS)"]],
WritesCompleted: writesCompleted,
SectorsWritten: sectorsWritten,
TimeSpentOnWriting: row[ProcHeaderIndex["TIME(WRITES)"]],
TimeSpentOnReadingMs: timeSpentOnReadingMs,
TimeSpentOnWritingMs: timeSpentOnWritingMs,
},
ReadsCompletedDelta: readsCompletedDelta,
SectorsReadDelta: sectorsReadDelta,
WritesCompletedDelta: writesCompletedDelta,
SectorsWrittenDelta: sectorsWrittenDelta,
NSEntry: NSEntry{
Interface: row[ProcHeaderIndex["INTERFACE"]],
ReceiveBytes: row[ProcHeaderIndex["RECEIVE-BYTES"]],
ReceivePackets: receivePackets,
TransmitBytes: row[ProcHeaderIndex["TRANSMIT-BYTES"]],
TransmitPackets: transmitPackets,
ReceiveBytesNum: receiveBytesNum,
TransmitBytesNum: transmitBytesNum,
},
ReceiveBytesDelta: row[ProcHeaderIndex["RECEIVE-BYTES-DELTA"]],
ReceivePacketsDelta: receivePacketsDelta,
TransmitBytesDelta: row[ProcHeaderIndex["TRANSMIT-BYTES-DELTA"]],
TransmitPacketsDelta: transmitPacketsDelta,
ReceiveBytesNumDelta: receiveBytesNumDelta,
TransmitBytesNumDelta: transmitBytesNumDelta,
Extra: []byte(row[ProcHeaderIndex["EXTRA"]]),
}
c.PID = proc.PSEntry.PID
c.DiskDevice = proc.DSEntry.Device
c.NetworkInterface = proc.NSEntry.Interface
c.Rows = append(c.Rows, proc)
}
return c, nil
}

271
vendor/github.com/gyuho/psn/proc_csv_binary_search.go generated vendored Normal file
View File

@ -0,0 +1,271 @@
package psn
import (
"math"
"sort"
)
// BinarySearchInt64 binary-searches the int64 slice
// and returns the index of the matching element.
// So input slice must be sorted.
// It returns -1 if not found.
func BinarySearchInt64(nums []int64, v int64) int {
lo := 0
hi := len(nums) - 1
for lo <= hi {
mid := lo + (hi-lo)/2
if nums[mid] < v {
lo = mid + 1 // keep searching on right-subtree
continue
}
if nums[mid] > v {
hi = mid - 1 // keep searching on left-subtree
continue
}
return mid
}
return -1
}
// Tree defines binary search tree.
type Tree interface {
Closest(v float64) (index int, value float64)
}
// NewBinaryTree builds a new binary search tree.
// The original slice won't be sorted.
func NewBinaryTree(nums []float64) Tree {
if len(nums) == 0 {
return nil
}
root := newFloat64Node(0, nums[0])
for i := range nums {
if i == 0 {
continue
}
insert(root, i, nums[i])
}
return root
}
// NewBinaryTreeInt64 builds a new binary search tree.
// The original slice won't be sorted.
func NewBinaryTreeInt64(nums []int64) Tree {
fs := make([]float64, len(nums))
for i := range nums {
fs[i] = float64(nums[i])
}
return NewBinaryTree(fs)
}
func (root *float64Node) Closest(v float64) (index int, value float64) {
nd := searchClosest(root, v)
return nd.Idx, nd.Value
}
// float64Node represents binary search tree
// to find the closest float64 value.
type float64Node struct {
Idx int
Value float64
Left *float64Node
Right *float64Node
}
// newFloat64Node returns a new float64Node.
func newFloat64Node(idx int, v float64) *float64Node {
return &float64Node{Idx: idx, Value: v}
}
// insert inserts a value to the binary search tree.
// For now, it assumes that values are unique.
func insert(root *float64Node, idx int, v float64) *float64Node {
if root == nil {
return newFloat64Node(idx, v)
}
if root.Value > v {
root.Left = insert(root.Left, idx, v)
} else {
root.Right = insert(root.Right, idx, v)
}
return root
}
// search searches a value in the binary search tree.
func search(root *float64Node, v float64) *float64Node {
if root == nil {
return nil
}
if root.Value == v {
return root
}
if root.Value > v {
return search(root.Left, v)
}
return search(root.Right, v)
}
// searchClosest searches the closest value in the binary search tree.
func searchClosest(root *float64Node, v float64) *float64Node {
if root == nil {
return nil
}
var child *float64Node
if root.Value > v {
child = searchClosest(root.Left, v)
} else {
child = searchClosest(root.Right, v)
}
// no children, just return root
if child == nil {
return root
}
rootDiff := math.Abs(float64(root.Value - v))
childDiff := math.Abs(float64(child.Value - v))
if rootDiff < childDiff {
// diff with root is smaller
return root
}
return child
}
// boundary is the pair of values in a boundary.
type boundary struct {
// index of 'lower' in the original slice
lower int64
lowerIdx int
// index of 'upper' in the original slice
upper int64
upperIdx int
}
type boundaries struct {
// store original slice as well
// to return the index
numsOrig []int64
num2OrigIdx map[int64]int
numsSorted []int64
num2SortedIdx map[int64]int
tr Tree
}
func buildBoundaries(nums []int64) *boundaries {
num2OrigIdx := make(map[int64]int)
for i := range nums {
num2OrigIdx[nums[i]] = i
}
numsOrig := make([]int64, len(nums))
copy(numsOrig, nums)
tr := NewBinaryTreeInt64(nums)
sort.Sort(int64Slice(nums))
num2SortedIdx := make(map[int64]int)
for i := range nums {
num2SortedIdx[nums[i]] = i
}
return &boundaries{
numsOrig: numsOrig,
num2OrigIdx: num2OrigIdx,
numsSorted: nums,
num2SortedIdx: num2SortedIdx,
tr: tr,
}
}
// adds a second to boundaries
// and rebuild the binary tree
func (bf *boundaries) add(sec int64) {
bf.numsOrig = append(bf.numsOrig, sec)
bf.num2OrigIdx[sec] = len(bf.numsOrig)
bf.numsSorted = append(bf.numsSorted, sec)
// re-sort
bf.tr = NewBinaryTreeInt64(bf.numsSorted)
sort.Sort(int64Slice(bf.numsSorted))
num2SortedIdx := make(map[int64]int)
for i := range bf.numsSorted {
num2SortedIdx[bf.numsSorted[i]] = i
}
bf.num2SortedIdx = num2SortedIdx
}
// returns the boundary with closest upper, lower value.
// returns the index of the value if found.
func (bf *boundaries) findBoundary(missingSecond int64) (bd boundary) {
idxOrig, vOrig := bf.tr.Closest(float64(missingSecond))
valOrig := int64(vOrig)
if valOrig == missingSecond {
bd.lower = valOrig
bd.lowerIdx = idxOrig
bd.upper = valOrig
bd.upperIdx = idxOrig
return
}
// use the idx in sorted!
idxx := bf.num2SortedIdx[valOrig]
if missingSecond > valOrig {
bd.lower = valOrig
bd.lowerIdx = idxOrig
// valOrig is the lower bound, we need to find another upper value
// continue search in right-half
// (assume 'nums' is sorted)
for j := idxx + 1; j < len(bf.numsSorted); j++ {
if bf.numsSorted[j] > missingSecond {
// found upper bound
bd.upper = bf.numsSorted[j]
bd.upperIdx = bf.num2OrigIdx[bf.numsSorted[j]]
return
}
}
bd.upper = 0
bd.upperIdx = -1
return
}
bd.upper = valOrig
bd.upperIdx = idxOrig
// valOrig is the upper bound, we need to find another lower value
// continue search in left-half
// (assume 'nums' is sorted)
for j := idxx - 1; j >= 0; j-- {
if bf.numsSorted[j] < missingSecond {
// found lower bound
bd.lower = bf.numsSorted[j]
bd.lowerIdx = bf.num2OrigIdx[bf.numsSorted[j]]
return
}
}
bd.lower = 0
bd.lowerIdx = -1
return
}
type int64Slice []int64
func (s int64Slice) Len() int { return len(s) }
func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }

447
vendor/github.com/gyuho/psn/proc_csv_interpolate.go generated vendored Normal file
View File

@ -0,0 +1,447 @@
package psn
import (
"fmt"
"sort"
humanize "github.com/dustin/go-humanize"
)
// Combine combines a list Proc and returns one combined Proc.
// Field values are estimated. UnixNanosecond is reset 0.
// And UnixSecond and other fields that cannot be averaged are set
// with the field value in the last element. This is meant to be
// used to combine Proc rows with duplicate unix second timestamps.
func Combine(procs ...Proc) Proc {
if len(procs) < 1 {
return Proc{}
}
if len(procs) == 1 {
return procs[0]
}
lastProc := procs[len(procs)-1]
combined := lastProc
combined.UnixNanosecond = 0
// calculate the average
var (
// for PSEntry
voluntaryCtxtSwitches uint64
nonVoluntaryCtxtSwitches uint64
cpuNum float64
vmRSSNum uint64
vmSizeNum uint64
// for LoadAvg
loadAvg1Minute float64
loadAvg5Minute float64
loadAvg15Minute float64
runnableKernelSchedulingEntities int64
currentKernelSchedulingEntities int64
// for DSEntry
readsCompleted uint64
sectorsRead uint64
writesCompleted uint64
sectorsWritten uint64
timeSpentOnReadingMs uint64
timeSpentOnWritingMs uint64
// for DSEntry delta
readsCompletedDelta uint64
sectorsReadDelta uint64
writesCompletedDelta uint64
sectorsWrittenDelta uint64
// for NSEntry
receivePackets uint64
transmitPackets uint64
receiveBytesNum uint64
transmitBytesNum uint64
// for NSEntry delta
receivePacketsDelta uint64
transmitPacketsDelta uint64
receiveBytesNumDelta uint64
transmitBytesNumDelta uint64
)
for _, p := range procs {
// for PSEntry
voluntaryCtxtSwitches += p.PSEntry.VoluntaryCtxtSwitches
nonVoluntaryCtxtSwitches += p.PSEntry.NonvoluntaryCtxtSwitches
cpuNum += p.PSEntry.CPUNum
vmRSSNum += p.PSEntry.VMRSSNum
vmSizeNum += p.PSEntry.VMSizeNum
// for LoadAvg
loadAvg1Minute += p.LoadAvg.LoadAvg1Minute
loadAvg5Minute += p.LoadAvg.LoadAvg5Minute
loadAvg15Minute += p.LoadAvg.LoadAvg15Minute
runnableKernelSchedulingEntities += p.LoadAvg.RunnableKernelSchedulingEntities
currentKernelSchedulingEntities += p.LoadAvg.CurrentKernelSchedulingEntities
// for DSEntry
readsCompleted += p.DSEntry.ReadsCompleted
sectorsRead += p.DSEntry.SectorsRead
writesCompleted += p.DSEntry.WritesCompleted
sectorsWritten += p.DSEntry.SectorsWritten
timeSpentOnReadingMs += p.DSEntry.TimeSpentOnReadingMs
timeSpentOnWritingMs += p.DSEntry.TimeSpentOnWritingMs
// for DSEntry delta
readsCompletedDelta += p.ReadsCompletedDelta
sectorsReadDelta += p.SectorsReadDelta
writesCompletedDelta += p.WritesCompletedDelta
sectorsWrittenDelta += p.SectorsWrittenDelta
// for NSEntry
receivePackets += p.NSEntry.ReceivePackets
transmitPackets += p.NSEntry.TransmitPackets
receiveBytesNum += p.NSEntry.ReceiveBytesNum
transmitBytesNum += p.NSEntry.TransmitBytesNum
// for NSEntry delta
receivePacketsDelta += p.ReceivePacketsDelta
transmitPacketsDelta += p.TransmitPacketsDelta
receiveBytesNumDelta += p.ReceiveBytesNumDelta
transmitBytesNumDelta += p.TransmitBytesNumDelta
}
pN := len(procs)
// for PSEntry
combined.PSEntry.VoluntaryCtxtSwitches = uint64(voluntaryCtxtSwitches) / uint64(pN)
combined.PSEntry.NonvoluntaryCtxtSwitches = uint64(nonVoluntaryCtxtSwitches) / uint64(pN)
combined.PSEntry.CPUNum = float64(cpuNum) / float64(pN)
combined.PSEntry.CPU = fmt.Sprintf("%3.2f %%", combined.PSEntry.CPUNum)
combined.PSEntry.VMRSSNum = uint64(vmRSSNum) / uint64(pN)
combined.PSEntry.VMRSS = humanize.Bytes(combined.PSEntry.VMRSSNum)
combined.PSEntry.VMSizeNum = uint64(vmSizeNum) / uint64(pN)
combined.PSEntry.VMSize = humanize.Bytes(combined.PSEntry.VMSizeNum)
// for LoadAvg
combined.LoadAvg.LoadAvg1Minute = float64(loadAvg1Minute) / float64(pN)
combined.LoadAvg.LoadAvg5Minute = float64(loadAvg5Minute) / float64(pN)
combined.LoadAvg.LoadAvg15Minute = float64(loadAvg15Minute) / float64(pN)
combined.LoadAvg.RunnableKernelSchedulingEntities = int64(loadAvg15Minute) / int64(pN)
combined.LoadAvg.CurrentKernelSchedulingEntities = int64(loadAvg15Minute) / int64(pN)
// for DSEntry
combined.DSEntry.ReadsCompleted = uint64(readsCompleted) / uint64(pN)
combined.DSEntry.SectorsRead = uint64(sectorsRead) / uint64(pN)
combined.DSEntry.WritesCompleted = uint64(writesCompleted) / uint64(pN)
combined.DSEntry.SectorsWritten = uint64(sectorsWritten) / uint64(pN)
combined.DSEntry.TimeSpentOnReadingMs = uint64(timeSpentOnReadingMs) / uint64(pN)
combined.DSEntry.TimeSpentOnReading = humanizeDurationMs(combined.DSEntry.TimeSpentOnReadingMs)
combined.DSEntry.TimeSpentOnWritingMs = uint64(timeSpentOnWritingMs) / uint64(pN)
combined.DSEntry.TimeSpentOnWriting = humanizeDurationMs(combined.DSEntry.TimeSpentOnWritingMs)
combined.ReadsCompletedDelta = uint64(readsCompletedDelta) / uint64(pN)
combined.SectorsReadDelta = uint64(sectorsReadDelta) / uint64(pN)
combined.WritesCompletedDelta = uint64(writesCompletedDelta) / uint64(pN)
combined.SectorsWrittenDelta = uint64(sectorsWrittenDelta) / uint64(pN)
// for NSEntry
combined.NSEntry.ReceiveBytesNum = uint64(receiveBytesNum) / uint64(pN)
combined.NSEntry.TransmitBytesNum = uint64(transmitBytesNum) / uint64(pN)
combined.NSEntry.ReceivePackets = uint64(receivePackets) / uint64(pN)
combined.NSEntry.TransmitPackets = uint64(transmitPackets) / uint64(pN)
combined.NSEntry.ReceiveBytes = humanize.Bytes(combined.NSEntry.ReceiveBytesNum)
combined.NSEntry.TransmitBytes = humanize.Bytes(combined.NSEntry.TransmitBytesNum)
combined.ReceivePacketsDelta = uint64(receivePacketsDelta) / uint64(pN)
combined.TransmitPacketsDelta = uint64(transmitPacketsDelta) / uint64(pN)
combined.ReceiveBytesNumDelta = uint64(receiveBytesNumDelta) / uint64(pN)
combined.ReceiveBytesDelta = humanize.Bytes(combined.ReceiveBytesNumDelta)
combined.TransmitBytesNumDelta = uint64(transmitBytesNumDelta) / uint64(pN)
combined.TransmitBytesDelta = humanize.Bytes(combined.TransmitBytesNumDelta)
return combined
}
// Interpolate returns the missing, estimated 'Proc's if any.
// It assumes that 'upper' Proc is later than 'lower'.
// And UnixSecond and other fields that cannot be averaged are set
// with the field value in the last element.
func Interpolate(lower, upper Proc) (procs []Proc, err error) {
if upper.UnixSecond <= lower.UnixSecond {
return nil, fmt.Errorf("lower unix second %d >= upper unix second %d", lower.UnixSecond, upper.UnixSecond)
}
// min unix second is 5, max is 7
// then the expected row number is 7-5+1=3
expectedRowN := upper.UnixSecond - lower.UnixSecond + 1
if expectedRowN == 2 {
// no need to interpolate
return
}
// calculate the delta
var (
// for PSEntry
voluntaryCtxtSwitches = (upper.PSEntry.VoluntaryCtxtSwitches - lower.PSEntry.VoluntaryCtxtSwitches) / uint64(expectedRowN)
nonVoluntaryCtxtSwitches = (upper.PSEntry.NonvoluntaryCtxtSwitches - lower.PSEntry.NonvoluntaryCtxtSwitches) / uint64(expectedRowN)
cpuNum = (upper.PSEntry.CPUNum - lower.PSEntry.CPUNum) / float64(expectedRowN)
vmRSSNum = (upper.PSEntry.VMRSSNum - lower.PSEntry.VMRSSNum) / uint64(expectedRowN)
vmSizeNum = (upper.PSEntry.VMSizeNum - lower.PSEntry.VMSizeNum) / uint64(expectedRowN)
// for LoadAvg
loadAvg1Minute = (upper.LoadAvg.LoadAvg1Minute - lower.LoadAvg.LoadAvg1Minute) / float64(expectedRowN)
loadAvg5Minute = (upper.LoadAvg.LoadAvg5Minute - lower.LoadAvg.LoadAvg5Minute) / float64(expectedRowN)
loadAvg15Minute = (upper.LoadAvg.LoadAvg15Minute - lower.LoadAvg.LoadAvg15Minute) / float64(expectedRowN)
runnableKernelSchedulingEntities = (upper.LoadAvg.RunnableKernelSchedulingEntities - lower.LoadAvg.RunnableKernelSchedulingEntities) / int64(expectedRowN)
currentKernelSchedulingEntities = (upper.LoadAvg.RunnableKernelSchedulingEntities - lower.LoadAvg.RunnableKernelSchedulingEntities) / int64(expectedRowN)
// for DSEntry
readsCompleted = (upper.DSEntry.ReadsCompleted - lower.DSEntry.ReadsCompleted) / uint64(expectedRowN)
sectorsRead = (upper.DSEntry.SectorsRead - lower.DSEntry.SectorsRead) / uint64(expectedRowN)
writesCompleted = (upper.DSEntry.WritesCompleted - lower.DSEntry.WritesCompleted) / uint64(expectedRowN)
sectorsWritten = (upper.DSEntry.SectorsWritten - lower.DSEntry.SectorsWritten) / uint64(expectedRowN)
timeSpentOnReadingMs = (upper.DSEntry.TimeSpentOnReadingMs - lower.DSEntry.TimeSpentOnReadingMs) / uint64(expectedRowN)
timeSpentOnWritingMs = (upper.DSEntry.TimeSpentOnWritingMs - lower.DSEntry.TimeSpentOnWritingMs) / uint64(expectedRowN)
// for DSEntry delta
readsCompletedDelta = (upper.ReadsCompletedDelta - lower.ReadsCompletedDelta) / uint64(expectedRowN)
sectorsReadDelta = (upper.SectorsReadDelta - lower.SectorsReadDelta) / uint64(expectedRowN)
writesCompletedDelta = (upper.WritesCompletedDelta - lower.WritesCompletedDelta) / uint64(expectedRowN)
sectorsWrittenDelta = (upper.SectorsWrittenDelta - lower.SectorsWrittenDelta) / uint64(expectedRowN)
// for NSEntry
receivePackets = (upper.NSEntry.ReceivePackets - lower.NSEntry.ReceivePackets) / uint64(expectedRowN)
transmitPackets = (upper.NSEntry.TransmitPackets - lower.NSEntry.TransmitPackets) / uint64(expectedRowN)
receiveBytesNum = (upper.NSEntry.ReceiveBytesNum - lower.NSEntry.ReceiveBytesNum) / uint64(expectedRowN)
transmitBytesNum = (upper.NSEntry.TransmitBytesNum - lower.NSEntry.TransmitBytesNum) / uint64(expectedRowN)
// for NSEntry delta
receivePacketsDelta = (upper.ReceivePacketsDelta - lower.ReceivePacketsDelta) / uint64(expectedRowN)
transmitPacketsDelta = (upper.TransmitPacketsDelta - lower.TransmitPacketsDelta) / uint64(expectedRowN)
receiveBytesNumDelta = (upper.ReceiveBytesNumDelta - lower.ReceiveBytesNumDelta) / uint64(expectedRowN)
transmitBytesNumDelta = (upper.TransmitBytesNumDelta - lower.TransmitBytesNumDelta) / uint64(expectedRowN)
)
procs = make([]Proc, expectedRowN-2)
for i := range procs {
procs[i] = upper
procs[i].UnixNanosecond = 0
procs[i].UnixSecond = lower.UnixSecond + int64(i+1)
// for PSEntry
procs[i].PSEntry.VoluntaryCtxtSwitches = lower.PSEntry.VoluntaryCtxtSwitches + uint64(i+1)*voluntaryCtxtSwitches
procs[i].PSEntry.NonvoluntaryCtxtSwitches = lower.PSEntry.NonvoluntaryCtxtSwitches + uint64(i+1)*nonVoluntaryCtxtSwitches
procs[i].PSEntry.CPUNum = lower.PSEntry.CPUNum + float64(i+1)*cpuNum
procs[i].PSEntry.CPU = fmt.Sprintf("%3.2f %%", procs[i].PSEntry.CPUNum)
procs[i].PSEntry.VMRSSNum = lower.PSEntry.VMRSSNum + uint64(i+1)*vmRSSNum
procs[i].PSEntry.VMRSS = humanize.Bytes(procs[i].PSEntry.VMRSSNum)
procs[i].PSEntry.VMSizeNum = lower.PSEntry.VMSizeNum + uint64(i+1)*vmSizeNum
procs[i].PSEntry.VMSize = humanize.Bytes(procs[i].PSEntry.VMSizeNum)
// for LoadAvg
procs[i].LoadAvg.LoadAvg1Minute = lower.LoadAvg.LoadAvg1Minute + float64(i+1)*loadAvg1Minute
procs[i].LoadAvg.LoadAvg5Minute = lower.LoadAvg.LoadAvg5Minute + float64(i+1)*loadAvg5Minute
procs[i].LoadAvg.LoadAvg15Minute = lower.LoadAvg.LoadAvg15Minute + float64(i+1)*loadAvg15Minute
procs[i].LoadAvg.RunnableKernelSchedulingEntities = lower.LoadAvg.RunnableKernelSchedulingEntities + int64(i+1)*runnableKernelSchedulingEntities
procs[i].LoadAvg.CurrentKernelSchedulingEntities = lower.LoadAvg.CurrentKernelSchedulingEntities + int64(i+1)*currentKernelSchedulingEntities
// for DSEntry
procs[i].DSEntry.ReadsCompleted = lower.DSEntry.ReadsCompleted + uint64(i+1)*readsCompleted
procs[i].DSEntry.SectorsRead = lower.DSEntry.SectorsRead + uint64(i+1)*sectorsRead
procs[i].DSEntry.WritesCompleted = lower.DSEntry.WritesCompleted + uint64(i+1)*writesCompleted
procs[i].DSEntry.SectorsWritten = lower.DSEntry.SectorsWritten + uint64(i+1)*sectorsWritten
procs[i].DSEntry.TimeSpentOnReadingMs = lower.DSEntry.TimeSpentOnReadingMs + uint64(i+1)*timeSpentOnReadingMs
procs[i].DSEntry.TimeSpentOnReading = humanizeDurationMs(procs[i].DSEntry.TimeSpentOnReadingMs)
procs[i].DSEntry.TimeSpentOnWritingMs = lower.DSEntry.TimeSpentOnWritingMs + uint64(i+1)*timeSpentOnWritingMs
procs[i].DSEntry.TimeSpentOnWriting = humanizeDurationMs(procs[i].DSEntry.TimeSpentOnWritingMs)
procs[i].ReadsCompletedDelta = lower.ReadsCompletedDelta + uint64(i+1)*readsCompletedDelta
procs[i].SectorsReadDelta = lower.SectorsReadDelta + uint64(i+1)*sectorsReadDelta
procs[i].WritesCompletedDelta = lower.WritesCompletedDelta + uint64(i+1)*writesCompletedDelta
procs[i].SectorsWrittenDelta = lower.SectorsWrittenDelta + uint64(i+1)*sectorsWrittenDelta
// for NSEntry
procs[i].NSEntry.ReceiveBytesNum = uint64(receiveBytesNum) + uint64(i+1)*voluntaryCtxtSwitches
procs[i].NSEntry.TransmitBytesNum = uint64(transmitBytesNum) + uint64(i+1)*voluntaryCtxtSwitches
procs[i].NSEntry.ReceivePackets = uint64(receivePackets) + uint64(i+1)*voluntaryCtxtSwitches
procs[i].NSEntry.TransmitPackets = uint64(transmitPackets) + uint64(i+1)*voluntaryCtxtSwitches
procs[i].NSEntry.ReceiveBytes = humanize.Bytes(procs[i].NSEntry.ReceiveBytesNum)
procs[i].NSEntry.TransmitBytes = humanize.Bytes(procs[i].NSEntry.TransmitBytesNum)
procs[i].ReceivePacketsDelta = uint64(receivePacketsDelta) + uint64(i+1)*voluntaryCtxtSwitches
procs[i].TransmitPacketsDelta = uint64(transmitPacketsDelta) + uint64(i+1)*voluntaryCtxtSwitches
procs[i].ReceiveBytesNumDelta = uint64(receiveBytesNumDelta) + uint64(i+1)*voluntaryCtxtSwitches
procs[i].ReceiveBytesDelta = humanize.Bytes(procs[i].ReceiveBytesNumDelta)
procs[i].TransmitBytesNumDelta = uint64(transmitBytesNumDelta) + uint64(i+1)*voluntaryCtxtSwitches
procs[i].TransmitBytesDelta = humanize.Bytes(procs[i].TransmitBytesNumDelta)
}
return
}
// Interpolate interpolates missing rows in CSV assuming CSV is to be collected for every second.
// 'Missing' means unix seconds in rows are not continuous.
// It fills in the empty rows by estimating the averages.
// It returns a new copy of CSV. And the new copy sets all unix nanoseconds to 0.,
// since it's now aggregated by the unix "second".
func (c *CSV) Interpolate() (cc *CSV, err error) {
if c == nil || len(c.Rows) < 2 {
// no need to interpolate
return
}
// copy the original CSV data
cc = &(*c)
// find missing rows, assuming CSV is to be collected every second
if cc.MinUnixSecond == cc.MaxUnixSecond {
// no need to interpolate
return
}
// min unix second is 5, max is 7
// then the expected row number is 7-5+1=3
expectedRowN := cc.MaxUnixSecond - cc.MinUnixSecond + 1
secondToAllProcs := make(map[int64][]Proc)
for _, row := range cc.Rows {
if _, ok := secondToAllProcs[row.UnixSecond]; ok {
secondToAllProcs[row.UnixSecond] = append(secondToAllProcs[row.UnixSecond], row)
} else {
secondToAllProcs[row.UnixSecond] = []Proc{row}
}
}
if int64(len(cc.Rows)) == expectedRowN && len(cc.Rows) == len(secondToAllProcs) {
// all rows have distinct unix second
// and they are all continuous unix seconds
return
}
// interpolate cases
//
// case #1. If duplicate rows are found (equal/different unix nanoseconds, equal unix seconds),
// combine those into one row with its average.
//
// case #2. If some rows are discontinuous in unix seconds, there are missing rows.
// Fill in those rows with average estimates.
// case #1, find duplicate rows!
// It finds duplicates by unix second! Not by unix nanoseconds!
secondToProc := make(map[int64]Proc)
for sec, procs := range secondToAllProcs {
if len(procs) == 0 {
return nil, fmt.Errorf("empty row found at unix second %d", sec)
}
if len(procs) == 1 {
secondToProc[sec] = procs[0]
continue // no need to combine
}
// procs conflicted on unix second,
// we want to combine those into one
secondToProc[sec] = Combine(procs...)
}
// sort and reset the unix second
rows2 := make([]Proc, 0, len(secondToProc))
allUnixSeconds := make([]int64, 0, len(secondToProc))
for _, row := range secondToProc {
row.UnixNanosecond = 0
rows2 = append(rows2, row)
allUnixSeconds = append(allUnixSeconds, row.UnixSecond)
}
sort.Sort(ProcSlice(rows2))
cc.Rows = rows2
cc.MinUnixNanosecond = rows2[0].UnixNanosecond
cc.MinUnixSecond = rows2[0].UnixSecond
cc.MaxUnixNanosecond = rows2[len(rows2)-1].UnixNanosecond
cc.MaxUnixSecond = rows2[len(rows2)-1].UnixSecond
// case #2, find missing rows!
// if unix seconds have discontinued ranges, it's missing some rows!
missingTS := make(map[int64]struct{})
for unixSecond := cc.MinUnixSecond; unixSecond <= cc.MaxUnixSecond; unixSecond++ {
_, ok := secondToProc[unixSecond]
if !ok {
missingTS[unixSecond] = struct{}{}
}
}
if len(missingTS) == 0 {
// now all rows have distinct unix second
// and there's no missing unix seconds
return
}
missingSeconds := make([]int64, 0, len(missingTS))
for ts := range missingTS {
missingSeconds = append(missingSeconds, ts)
}
sort.Sort(int64Slice(missingSeconds))
for i := range missingSeconds {
second := missingSeconds[i]
if _, ok := secondToProc[second]; ok {
return nil, fmt.Errorf("second %d is not supposed to be found at secondToProc but found", second)
}
}
// now we need to estimate the Proc for missingTS
// fmt.Printf("total %d points available, missing %d points\n", len(allUnixSeconds), len(missingTS))
bds := buildBoundaries(allUnixSeconds)
// start from mid, in case missing seconds are continuous (several seconds empty)
for i := range missingSeconds {
second := missingSeconds[i]
if _, ok := secondToProc[second]; ok {
// already estimated!
continue
}
bd := bds.findBoundary(second)
if bd.lower == second && bd.upper == second {
return nil, fmt.Errorf("%d is supposed to be missing but found at index %d", second, bd.lowerIdx)
}
// not found at boundaries pool
// must have been found since it was created with min,max unix second
if bd.lowerIdx == -1 || bd.upperIdx == -1 {
return nil, fmt.Errorf("boundary is not found for missing second %d", second)
}
procLower, ok := secondToProc[bd.lower]
if !ok {
return nil, fmt.Errorf("%d is not found at secondToProc", bd.lower)
}
procUpper, ok := secondToProc[bd.upper]
if !ok {
return nil, fmt.Errorf("%d is not found at secondToProc", bd.upper)
}
missingRows, err := Interpolate(procLower, procUpper)
if err != nil {
return nil, err
}
for _, mrow := range missingRows {
secondToProc[mrow.UnixSecond] = mrow
// now 'mrow.UnixSecond' is not missing anymore
bds.add(mrow.UnixSecond)
}
}
rows3 := make([]Proc, 0, len(secondToProc))
for _, row := range secondToProc {
row.UnixNanosecond = 0
rows3 = append(rows3, row)
}
sort.Sort(ProcSlice(rows3))
cc.Rows = rows3
cc.MinUnixNanosecond = rows3[0].UnixNanosecond
cc.MinUnixSecond = rows3[0].UnixSecond
cc.MaxUnixNanosecond = rows3[len(rows3)-1].UnixNanosecond
cc.MaxUnixSecond = rows3[len(rows3)-1].UnixSecond
return
}
// ConvertUnixNano unix nanoseconds to unix second.
func ConvertUnixNano(unixNano int64) (unixSec int64) {
return int64(unixNano / 1e9)
}

93
vendor/github.com/gyuho/psn/proc_loadavg_linux.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
package psn
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
)
type procLoadAvgColumnIndex int
const (
proc_loadavg_idx_load_avg_1_minute procLoadAvgColumnIndex = iota
proc_loadavg_idx_load_avg_5_minute
proc_loadavg_idx_load_avg_15_minute
proc_loadavg_idx_kernel_scheduling_entities_with_slash
proc_loadavg_idx_pid
)
// GetProcLoadAvg reads '/proc/loadavg'.
// Expected output is '0.37 0.47 0.39 1/839 31397'.
func GetProcLoadAvg() (LoadAvg, error) {
txt, err := readProcLoadAvg()
if err != nil {
return LoadAvg{}, err
}
return getProcLoadAvg(txt)
}
func readProcLoadAvg() (string, error) {
f, err := openToRead("/proc/loadavg")
if err != nil {
return "", err
}
defer f.Close()
bts, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
return strings.TrimSpace(string(bts)), nil
}
func getProcLoadAvg(txt string) (LoadAvg, error) {
ds := strings.Fields(txt)
if len(ds) < 5 {
return LoadAvg{}, fmt.Errorf("not enough columns at %v", ds)
}
lavg := LoadAvg{}
avg1, err := strconv.ParseFloat(ds[proc_loadavg_idx_load_avg_1_minute], 64)
if err != nil {
return LoadAvg{}, err
}
lavg.LoadAvg1Minute = avg1
avg5, err := strconv.ParseFloat(ds[proc_loadavg_idx_load_avg_5_minute], 64)
if err != nil {
return LoadAvg{}, err
}
lavg.LoadAvg5Minute = avg5
avg15, err := strconv.ParseFloat(ds[proc_loadavg_idx_load_avg_15_minute], 64)
if err != nil {
return LoadAvg{}, err
}
lavg.LoadAvg15Minute = avg15
slashed := strings.Split(ds[proc_loadavg_idx_kernel_scheduling_entities_with_slash], "/")
if len(slashed) != 2 {
return LoadAvg{}, fmt.Errorf("expected '/' string in kernel scheduling entities field, got %v", slashed)
}
s1, err := strconv.ParseInt(slashed[0], 10, 64)
if err != nil {
return LoadAvg{}, err
}
lavg.RunnableKernelSchedulingEntities = s1
s2, err := strconv.ParseInt(slashed[1], 10, 64)
if err != nil {
return LoadAvg{}, err
}
lavg.CurrentKernelSchedulingEntities = s2
pid, err := strconv.ParseInt(ds[proc_loadavg_idx_pid], 10, 64)
if err != nil {
return LoadAvg{}, err
}
lavg.Pid = pid
return lavg, nil
}

View File

@ -24,7 +24,7 @@ func (tp TransportProtocol) String() string {
case TypeTCP6:
return "tcp6"
default:
panic(fmt.Errorf("unknown %v", tp))
panic(fmt.Errorf("unknown transport protocol %d", tp))
}
}

View File

@ -111,7 +111,7 @@ func parseProcStat(pid int64, up Uptime) (Stat, error) {
hF := s.FieldByName(column + "ParsedStatus")
if hF.IsValid() {
if hF.CanSet() {
hF.SetString(strings.TrimSpace(fv))
hF.SetString(convertProcStatus(fv))
}
}
}
@ -138,43 +138,15 @@ func (s *Stat) update(up Uptime) (Stat, error) {
if strings.HasSuffix(s.Comm, ")") {
s.Comm = s.Comm[:len(s.Comm)-1]
}
cu, err := s.getCPU(up)
if err != nil {
return Stat{}, err
}
s.CpuUsage = cu
return *s, nil
}
// getCPU returns the average CPU usage in percentage.
// http://stackoverflow.com/questions/16726779/how-do-i-get-the-total-cpu-usage-of-an-application-from-proc-pid-stat
func (s Stat) getCPU(up Uptime) (float64, error) {
totalSec := s.Utime + s.Stime
totalSec += s.Cutime + s.Cstime
out, err := exec.Command("/usr/bin/getconf", "CLK_TCK").Output()
if err != nil {
return 0, err
}
ot := strings.TrimSpace(strings.Replace(string(out), "\n", "", -1))
hertz, err := strconv.ParseUint(ot, 10, 64)
if err != nil || hertz == 0 {
return 0, err
}
tookSec := up.UptimeTotal - (float64(s.Starttime) / float64(hertz))
if hertz == 0 || tookSec == 0.0 {
return 0.0, nil
}
return 100 * ((float64(totalSec) / float64(hertz)) / float64(tookSec)), nil
}
const statTmpl = `
----------------------------------------
[/proc/{{.Pid}}/stat]
Name: {{.Comm}}
State: {{.State}}
State: {{.StateParsedStatus}}
Pid: {{.Pid}}
Ppid: {{.Ppid}}
@ -183,7 +155,6 @@ NumThreads: {{.NumThreads}}
Rss: {{.RssParsedBytes}} ({{.RssBytesN}})
Rsslim: {{.RsslimParsedBytes}} ({{.RsslimBytesN}})
Vsize: {{.VsizeParsedBytes}} ({{.VsizeBytesN}})
CpuUsage: {{.CpuUsage}} %
Starttime: {{.Starttime}}
Utime: {{.Utime}}
@ -246,3 +217,28 @@ func (s Stat) String() string {
}
return buf.String()
}
// GetCPUPercentage returns the average CPU usage in percentage.
// http://stackoverflow.com/questions/16726779/how-do-i-get-the-total-cpu-usage-of-an-application-from-proc-pid-stat
// This sometimes differ from the one in 'top' command.
// So do not use it!
func (s Stat) GetCPUPercentage(up Uptime) (float64, error) {
totalSec := s.Utime + s.Stime
totalSec += s.Cutime + s.Cstime
out, err := exec.Command("/usr/bin/getconf", "CLK_TCK").Output()
if err != nil {
return 0, err
}
ot := strings.TrimSpace(strings.Replace(string(out), "\n", "", -1))
hertz, err := strconv.ParseUint(ot, 10, 64)
if err != nil || hertz == 0 {
return 0, err
}
tookSec := up.UptimeTotal - (float64(s.Starttime) / float64(hertz))
if hertz == 0 || tookSec == 0.0 {
return 0.0, nil
}
return 100 * ((float64(totalSec) / float64(hertz)) / float64(tookSec)), nil
}

View File

@ -8,6 +8,8 @@ type RawDataType int
const (
TypeBytes RawDataType = iota
TypeInt64
TypeFloat64
TypeTimeMicroseconds
TypeTimeSeconds
TypeIPAddress
@ -87,6 +89,47 @@ var NetTCP = RawData{
},
}
// TopCommandRow represents a row in 'top' command output.
// (See http://man7.org/linux/man-pages/man1/top.1.html).
var TopCommandRow = RawData{
IsYAML: false,
Columns: []Column{
{"PID", "pid of the process", reflect.Int64},
{"USER", "user name", reflect.String},
{"PR", "priority", reflect.String},
{"NI", "nice value of the task", reflect.String},
{"VIRT", "total amount of virtual memory used by the task (in KiB)", reflect.String},
{"RES", "non-swapped physical memory a task is using (in KiB)", reflect.String},
{"SHR", "amount of shared memory available to a task, not all of which is typically resident (in KiB)", reflect.String},
{"S", "process status", reflect.String},
{"CPUPercent", "%CPU", reflect.Float64},
{"MEMPercent", "%MEM", reflect.Float64},
{"TIME", "CPU time (TIME+)", reflect.String},
{"COMMAND", "command", reflect.String},
},
ColumnsToParse: map[string]RawDataType{
"S": TypeStatus,
"VIRT": TypeBytes,
"RES": TypeBytes,
"SHR": TypeBytes,
},
}
// LoadAvg represents '/proc/loadavg'
// (See http://man7.org/linux/man-pages/man5/proc.5.html).
var LoadAvg = RawData{
IsYAML: false,
Columns: []Column{
{"load-avg-1-minute", "total uptime in seconds", reflect.Float64},
{"load-avg-5-minute", "total uptime in seconds", reflect.Float64},
{"load-avg-15-minute", "total uptime in seconds", reflect.Float64},
{"runnable-kernel-scheduling-entities", "number of currently runnable kernel scheduling entities (processes, threads)", reflect.Int64},
{"current-kernel-scheduling-entities", "number of kernel scheduling entities that currently exist on the system", reflect.Int64},
{"pid", "PID of the process that was most recently created on the system", reflect.Int64},
},
ColumnsToParse: map[string]RawDataType{},
}
// Uptime represents '/proc/uptime'
// (See http://man7.org/linux/man-pages/man5/proc.5.html).
var Uptime = RawData{

277
vendor/github.com/gyuho/psn/top.go generated vendored Normal file
View File

@ -0,0 +1,277 @@
package psn
import (
"bytes"
"fmt"
"io"
"os/exec"
"reflect"
"strconv"
"strings"
humanize "github.com/dustin/go-humanize"
)
// GetTop returns all entries in 'top' command.
// If pid<1, it reads all processes in 'top' command.
func GetTop(topPath string, pid int64) ([]TopCommandRow, error) {
o, err := ReadTop(topPath, pid)
if err != nil {
return nil, err
}
return ParseTopOutput(o)
}
// GetTopDefault returns all entries in 'top' command.
// If pid<1, it reads all processes in 'top' command.
func GetTopDefault(pid int64) ([]TopCommandRow, error) {
o, err := ReadTop(DefaultTopPath, pid)
if err != nil {
return nil, err
}
return ParseTopOutput(o)
}
// DefaultTopPath is the default 'top' command path.
var DefaultTopPath = "/usr/bin/top"
// ReadTopDefault reads Linux 'top' command output.
func ReadTopDefault(pid int64) (string, error) {
return ReadTop(DefaultTopPath, pid)
}
// ReadTop reads Linux 'top' command output.
func ReadTop(topPath string, pid int64) (string, error) {
buf := new(bytes.Buffer)
err := readTop(topPath, pid, buf)
o := strings.TrimSpace(buf.String())
return o, err
}
func readTop(topPath string, pid int64, w io.Writer) error {
if !exist(topPath) {
return fmt.Errorf("%q does not exist", topPath)
}
topFlags := []string{"-b", "-n", "1"}
if pid > 0 {
topFlags = append(topFlags, "-p", fmt.Sprint(pid))
}
cmd := exec.Command(topPath, topFlags...)
cmd.Stdout = w
cmd.Stderr = w
return cmd.Run()
}
func convertProcStatus(s string) string {
ns := strings.TrimSpace(s)
if len(s) > 1 {
ns = ns[:1]
}
switch ns {
case "D":
return "D (uninterruptible sleep)"
case "R":
return "R (running)"
case "S":
return "S (sleeping)"
case "T":
return "T (stopped by job control signal)"
case "t":
return "t (stopped by debugger during trace)"
case "Z":
return "Z (zombie)"
default:
return fmt.Sprintf("unknown process %q", s)
}
}
// parses KiB strings, returns bytes in int64, and humanized bytes.
//
// KiB = kibibyte = 1024 bytes
// MiB = mebibyte = 1024 KiB = 1,048,576 bytes
// GiB = gibibyte = 1024 MiB = 1,073,741,824 bytes
// TiB = tebibyte = 1024 GiB = 1,099,511,627,776 bytes
// PiB = pebibyte = 1024 TiB = 1,125,899,906,842,624 bytes
// EiB = exbibyte = 1024 PiB = 1,152,921,504,606,846,976 bytes
//
func parseKiBInTop(s string) (bts uint64, hs string, err error) {
s = strings.TrimSpace(s)
switch {
// suffix 'm' means megabytes
case strings.HasSuffix(s, "m"):
ns := s[:len(s)-1]
var mib float64
mib, err = strconv.ParseFloat(ns, 64)
if err != nil {
return 0, "", err
}
bts = uint64(mib) * 1024 * 1024
// suffix 'g' means gigabytes
case strings.HasSuffix(s, "g"):
ns := s[:len(s)-1]
var gib float64
gib, err = strconv.ParseFloat(ns, 64)
if err != nil {
return 0, "", err
}
bts = uint64(gib) * 1024 * 1024 * 1024
default:
var kib float64
kib, err = strconv.ParseFloat(s, 64)
if err != nil {
return 0, "", err
}
bts = uint64(kib) * 1024
}
hs = humanize.Bytes(bts)
return
}
// TopRowHeaders is the headers in 'top' output.
var TopRowHeaders = []string{
"PID",
"USER",
"PR",
"NI",
"VIRT",
"RES",
"SHR",
"S",
"%CPU",
"%MEM",
"TIME+",
"COMMAND",
}
type topCommandOutputRowIdx int
const (
top_command_output_row_idx_pid topCommandOutputRowIdx = iota
top_command_output_row_idx_user
top_command_output_row_idx_pr
top_command_output_row_idx_ni
top_command_output_row_idx_virt
top_command_output_row_idx_res
top_command_output_row_idx_shr
top_command_output_row_idx_s
top_command_output_row_idx_cpu
top_command_output_row_idx_mem
top_command_output_row_idx_time
top_command_output_row_idx_command
)
// ParseTopOutput parses 'top' command output and returns the rows.
func ParseTopOutput(s string) ([]TopCommandRow, error) {
lines := strings.Split(s, "\n")
rows := make([][]string, 0, len(lines))
headerFound := false
for _, line := range lines {
if len(line) == 0 {
continue
}
ds := strings.Fields(strings.TrimSpace(line))
if ds[0] == "PID" { // header line
if !reflect.DeepEqual(ds, TopRowHeaders) {
return nil, fmt.Errorf("unexpected 'top' command header order (%v, expected %v)", ds, TopRowHeaders)
}
headerFound = true
continue
}
if !headerFound {
continue
}
row := strings.Fields(strings.TrimSpace(line))
if len(row) != len(TopRowHeaders) {
return nil, fmt.Errorf("unexpected row column number %v (expected %v)", row, TopRowHeaders)
}
rows = append(rows, row)
}
type result struct {
row TopCommandRow
err error
}
rc := make(chan result, len(rows))
for _, row := range rows {
go func(row []string) {
tr, err := parseTopRow(row)
rc <- result{row: tr, err: err}
}(row)
}
tcRows := make([]TopCommandRow, 0, len(rows))
for len(tcRows) != len(rows) {
select {
case rs := <-rc:
if rs.err != nil {
return nil, rs.err
}
tcRows = append(tcRows, rs.row)
}
}
return tcRows, nil
}
func parseTopRow(row []string) (TopCommandRow, error) {
trow := TopCommandRow{
USER: strings.TrimSpace(row[top_command_output_row_idx_user]),
}
pv, err := strconv.ParseInt(row[top_command_output_row_idx_pid], 10, 64)
if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
trow.PID = pv
trow.PR = strings.TrimSpace(row[top_command_output_row_idx_pr])
trow.NI = strings.TrimSpace(row[top_command_output_row_idx_ni])
virt, virtTxt, err := parseKiBInTop(row[top_command_output_row_idx_virt])
if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
trow.VIRT = row[top_command_output_row_idx_virt]
trow.VIRTBytesN = virt
trow.VIRTParsedBytes = virtTxt
res, resTxt, err := parseKiBInTop(row[top_command_output_row_idx_res])
if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
trow.RES = row[top_command_output_row_idx_res]
trow.RESBytesN = res
trow.RESParsedBytes = resTxt
shr, shrTxt, err := parseKiBInTop(row[top_command_output_row_idx_shr])
if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
trow.SHR = row[top_command_output_row_idx_shr]
trow.SHRBytesN = shr
trow.SHRParsedBytes = shrTxt
trow.S = row[top_command_output_row_idx_s]
trow.SParsedStatus = convertProcStatus(row[top_command_output_row_idx_s])
cnum, err := strconv.ParseFloat(row[top_command_output_row_idx_cpu], 64)
if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
trow.CPUPercent = cnum
mnum, err := strconv.ParseFloat(row[top_command_output_row_idx_mem], 64)
if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
trow.MEMPercent = mnum
trow.TIME = row[top_command_output_row_idx_time]
return trow, nil
}

19
vendor/github.com/gyuho/psn/util.go generated vendored
View File

@ -78,3 +78,22 @@ func homeDir() string {
}
return os.Getenv("HOME")
}
// exist returns true if the file or directory exists.
func exist(fpath string) bool {
st, err := os.Stat(fpath)
if err != nil {
if os.IsNotExist(err) {
return false
}
}
if st.IsDir() {
return true
}
if _, err := os.Stat(fpath); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}

View File

@ -1,127 +0,0 @@
package psn
import (
"bytes"
"encoding/binary"
"fmt"
"net"
"syscall"
)
var errNoDefaultRoute = fmt.Errorf("could not find default route")
func getDefaultRoute() (*syscall.NetlinkMessage, error) {
dat, err := syscall.NetlinkRIB(syscall.RTM_GETROUTE, syscall.AF_UNSPEC)
if err != nil {
return nil, err
}
msgs, msgErr := syscall.ParseNetlinkMessage(dat)
if msgErr != nil {
return nil, msgErr
}
rtmsg := syscall.RtMsg{}
for _, m := range msgs {
if m.Header.Type != syscall.RTM_NEWROUTE {
continue
}
buf := bytes.NewBuffer(m.Data[:syscall.SizeofRtMsg])
if rerr := binary.Read(buf, binary.LittleEndian, &rtmsg); rerr != nil {
continue
}
if rtmsg.Dst_len == 0 {
// zero-length Dst_len implies default route
return &m, nil
}
}
return nil, errNoDefaultRoute
}
func getIface(idx uint32) (*syscall.NetlinkMessage, error) {
dat, err := syscall.NetlinkRIB(syscall.RTM_GETADDR, syscall.AF_UNSPEC)
if err != nil {
return nil, err
}
msgs, msgErr := syscall.ParseNetlinkMessage(dat)
if msgErr != nil {
return nil, msgErr
}
ifaddrmsg := syscall.IfAddrmsg{}
for _, m := range msgs {
if m.Header.Type != syscall.RTM_NEWADDR {
continue
}
buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfAddrmsg])
if rerr := binary.Read(buf, binary.LittleEndian, &ifaddrmsg); rerr != nil {
continue
}
if ifaddrmsg.Index == idx {
return &m, nil
}
}
return nil, errNoDefaultRoute
}
var errNoDefaultInterface = fmt.Errorf("could not find default interface")
// GetDefaultInterface returns the default network interface
// (copied from https://github.com/coreos/etcd/blob/master/pkg/netutil/routes_linux.go).
func GetDefaultInterface() (string, error) {
rmsg, rerr := getDefaultRoute()
if rerr != nil {
return "", rerr
}
_, oif, err := parsePREFSRC(rmsg)
if err != nil {
return "", err
}
ifmsg, ierr := getIface(oif)
if ierr != nil {
return "", ierr
}
attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
if aerr != nil {
return "", aerr
}
for _, attr := range attrs {
if attr.Attr.Type == syscall.IFLA_IFNAME {
return string(attr.Value[:len(attr.Value)-1]), nil
}
}
return "", errNoDefaultInterface
}
// parsePREFSRC returns preferred source address and output interface index (RTA_OIF).
func parsePREFSRC(m *syscall.NetlinkMessage) (host string, oif uint32, err error) {
var attrs []syscall.NetlinkRouteAttr
attrs, err = syscall.ParseNetlinkRouteAttr(m)
if err != nil {
return "", 0, err
}
for _, attr := range attrs {
if attr.Attr.Type == syscall.RTA_PREFSRC {
host = net.IP(attr.Value).String()
}
if attr.Attr.Type == syscall.RTA_OIF {
oif = binary.LittleEndian.Uint32(attr.Value)
}
if host != "" && oif != uint32(0) {
break
}
}
if oif == 0 {
err = errNoDefaultRoute
}
return
}

View File

@ -1,6 +1,7 @@
package api
type Node struct {
ID string
Node string
Address string
TaggedAddresses map[string]string
@ -8,6 +9,7 @@ type Node struct {
}
type CatalogService struct {
ID string
Node string
Address string
TaggedAddresses map[string]string
@ -28,6 +30,7 @@ type CatalogNode struct {
}
type CatalogRegistration struct {
ID string
Node string
Address string
TaggedAddresses map[string]string
@ -39,7 +42,7 @@ type CatalogRegistration struct {
type CatalogDeregistration struct {
Node string
Address string
Address string // Obsolete.
Datacenter string
ServiceID string
CheckID string

View File

@ -43,6 +43,11 @@ type ServiceQuery struct {
// this list it must be present. If the tag is preceded with "!" then
// it is disallowed.
Tags []string
// NodeMeta is a map of required node metadata fields. If a key/value
// pair is in this map it must be present on the node in order for the
// service entry to be returned.
NodeMeta map[string]string
}
// QueryTemplate carries the arguments for creating a templated query.

File diff suppressed because it is too large Load Diff

View File

@ -72,80 +72,80 @@ func (m MountStatsNFS) mountStats() {}
// by an NFS client to and from an NFS server.
type NFSBytesStats struct {
// Number of bytes read using the read() syscall.
Read int
Read uint64
// Number of bytes written using the write() syscall.
Write int
Write uint64
// Number of bytes read using the read() syscall in O_DIRECT mode.
DirectRead int
DirectRead uint64
// Number of bytes written using the write() syscall in O_DIRECT mode.
DirectWrite int
DirectWrite uint64
// Number of bytes read from the NFS server, in total.
ReadTotal int
ReadTotal uint64
// Number of bytes written to the NFS server, in total.
WriteTotal int
WriteTotal uint64
// Number of pages read directly via mmap()'d files.
ReadPages int
ReadPages uint64
// Number of pages written directly via mmap()'d files.
WritePages int
WritePages uint64
}
// A NFSEventsStats contains statistics about NFS event occurrences.
type NFSEventsStats struct {
// Number of times cached inode attributes are re-validated from the server.
InodeRevalidate int
InodeRevalidate uint64
// Number of times cached dentry nodes are re-validated from the server.
DnodeRevalidate int
DnodeRevalidate uint64
// Number of times an inode cache is cleared.
DataInvalidate int
DataInvalidate uint64
// Number of times cached inode attributes are invalidated.
AttributeInvalidate int
AttributeInvalidate uint64
// Number of times files or directories have been open()'d.
VFSOpen int
VFSOpen uint64
// Number of times a directory lookup has occurred.
VFSLookup int
VFSLookup uint64
// Number of times permissions have been checked.
VFSAccess int
VFSAccess uint64
// Number of updates (and potential writes) to pages.
VFSUpdatePage int
VFSUpdatePage uint64
// Number of pages read directly via mmap()'d files.
VFSReadPage int
VFSReadPage uint64
// Number of times a group of pages have been read.
VFSReadPages int
VFSReadPages uint64
// Number of pages written directly via mmap()'d files.
VFSWritePage int
VFSWritePage uint64
// Number of times a group of pages have been written.
VFSWritePages int
VFSWritePages uint64
// Number of times directory entries have been read with getdents().
VFSGetdents int
VFSGetdents uint64
// Number of times attributes have been set on inodes.
VFSSetattr int
VFSSetattr uint64
// Number of pending writes that have been forcefully flushed to the server.
VFSFlush int
VFSFlush uint64
// Number of times fsync() has been called on directories and files.
VFSFsync int
VFSFsync uint64
// Number of times locking has been attemped on a file.
VFSLock int
VFSLock uint64
// Number of times files have been closed and released.
VFSFileRelease int
VFSFileRelease uint64
// Unknown. Possibly unused.
CongestionWait int
CongestionWait uint64
// Number of times files have been truncated.
Truncation int
Truncation uint64
// Number of times a file has been grown due to writes beyond its existing end.
WriteExtension int
WriteExtension uint64
// Number of times a file was removed while still open by another process.
SillyRename int
SillyRename uint64
// Number of times the NFS server gave less data than expected while reading.
ShortRead int
ShortRead uint64
// Number of times the NFS server wrote less data than expected while writing.
ShortWrite int
ShortWrite uint64
// Number of times the NFS server indicated EJUKEBOX; retrieving data from
// offline storage.
JukeboxDelay int
JukeboxDelay uint64
// Number of NFS v4.1+ pNFS reads.
PNFSRead int
PNFSRead uint64
// Number of NFS v4.1+ pNFS writes.
PNFSWrite int
PNFSWrite uint64
}
// A NFSOperationStats contains statistics for a single operation.
@ -153,15 +153,15 @@ type NFSOperationStats struct {
// The name of the operation.
Operation string
// Number of requests performed for this operation.
Requests int
Requests uint64
// Number of times an actual RPC request has been transmitted for this operation.
Transmissions int
Transmissions uint64
// Number of times a request has had a major timeout.
MajorTimeouts int
MajorTimeouts uint64
// Number of bytes sent for this operation, including RPC headers and payload.
BytesSent int
BytesSent uint64
// Number of bytes received for this operation, including RPC headers and payload.
BytesReceived int
BytesReceived uint64
// Duration all requests spent queued for transmission before they were sent.
CumulativeQueueTime time.Duration
// Duration it took to get a reply back after the request was transmitted.
@ -174,41 +174,41 @@ type NFSOperationStats struct {
// responses.
type NFSTransportStats struct {
// The local port used for the NFS mount.
Port int
Port uint64
// Number of times the client has had to establish a connection from scratch
// to the NFS server.
Bind int
Bind uint64
// Number of times the client has made a TCP connection to the NFS server.
Connect int
Connect uint64
// Duration (in jiffies, a kernel internal unit of time) the NFS mount has
// spent waiting for connections to the server to be established.
ConnectIdleTime int
ConnectIdleTime uint64
// Duration since the NFS mount last saw any RPC traffic.
IdleTime time.Duration
// Number of RPC requests for this mount sent to the NFS server.
Sends int
Sends uint64
// Number of RPC responses for this mount received from the NFS server.
Receives int
Receives uint64
// Number of times the NFS server sent a response with a transaction ID
// unknown to this client.
BadTransactionIDs int
BadTransactionIDs uint64
// A running counter, incremented on each request as the current difference
// ebetween sends and receives.
CumulativeActiveRequests int
CumulativeActiveRequests uint64
// A running counter, incremented on each request by the current backlog
// queue size.
CumulativeBacklog int
CumulativeBacklog uint64
// Stats below only available with stat version 1.1.
// Maximum number of simultaneously active RPC requests ever used.
MaximumRPCSlotsUsed int
MaximumRPCSlotsUsed uint64
// A running counter, incremented on each request as the current size of the
// sending queue.
CumulativeSendingQueue int
CumulativeSendingQueue uint64
// A running counter, incremented on each request as the current size of the
// pending queue.
CumulativePendingQueue int
CumulativePendingQueue uint64
}
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
@ -386,9 +386,9 @@ func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
}
ns := make([]int, 0, fieldBytesLen)
ns := make([]uint64, 0, fieldBytesLen)
for _, s := range ss {
n, err := strconv.Atoi(s)
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
@ -415,9 +415,9 @@ func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
}
ns := make([]int, 0, fieldEventsLen)
ns := make([]uint64, 0, fieldEventsLen)
for _, s := range ss {
n, err := strconv.Atoi(s)
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
@ -480,9 +480,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
}
// Skip string operation name for integers
ns := make([]int, 0, numFields-1)
ns := make([]uint64, 0, numFields-1)
for _, st := range ss[1:] {
n, err := strconv.Atoi(st)
n, err := strconv.ParseUint(st, 10, 64)
if err != nil {
return nil, err
}
@ -524,9 +524,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
// in a v1.0 response
ns := make([]int, 0, fieldTransport11Len)
ns := make([]uint64, 0, fieldTransport11Len)
for _, s := range ss {
n, err := strconv.Atoi(s)
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}