vendor: update 'github.com/gyuho/psn'

This commit is contained in:
Gyu-Ho Lee 2017-02-02 09:54:58 -08:00
parent 99535d31f4
commit cb08d56c03
No known key found for this signature in database
GPG Key ID: 1DDD39C7EB70C24C
15 changed files with 1829 additions and 689 deletions

14
glide.lock generated
View File

@ -1,5 +1,5 @@
hash: a742783c5966644247e12ddcda9629965818fdd68e659a0866f279a88d49290f
updated: 2017-01-27T15:23:50.941693612-08:00
hash: c9f541eb51fff541f3c789e790bc21c14a8af0d90b9e019d87664acef2424d80
updated: 2017-02-02T09:54:31.002917679-08:00
imports:
- name: bitbucket.org/zombiezen/gopdf
version: 1c63dc69751bc45441c2ce1f56b631c55294b4d5
@ -97,7 +97,7 @@ imports:
- name: github.com/gyuho/dataframe
version: e715fd4225590f6558961c316924e56a8a2647b0
- name: github.com/gyuho/psn
version: 5a0ffadd2ecaca939e373bf01aa45ac66c8fa2c0
version: 2c7fdc8639b23472a462bdc3001cadfa586c1b39
subpackages:
- schema
- name: github.com/hashicorp/consul
@ -107,7 +107,7 @@ imports:
- name: github.com/hashicorp/go-cleanhttp
version: ad28ea4487f05916463e2423a55166280e8254b5
- name: github.com/hashicorp/serf
version: 44157f9dfdbcf6d9e20fcfd52cfee348dbc467ee
version: 34e94dbd8faa991710b442c22ad6ad37c8b44c3b
subpackages:
- coordinate
- name: github.com/inconshreveable/mousetrap
@ -118,7 +118,7 @@ imports:
- draw2dbase
- draw2dimg
- name: github.com/mattn/go-runewidth
version: 737072b4e32b7a5018b4a7125da8d12de90e8045
version: 14207d285c6c197daabb5c9793d63e7af9ab2d50
- name: github.com/matttproud/golang_protobuf_extensions
version: c12348ce28de40eed0136aa2b644d0ee0650e56c
subpackages:
@ -140,7 +140,7 @@ imports:
- internal/bitbucket.org/ww/goautoneg
- model
- name: github.com/prometheus/procfs
version: fcdb11ccb4389efb1b210b7ffb623ab71c5fdd60
version: 1878d9fbb537119d24b21ca07effd591627cd160
- name: github.com/samuel/go-zookeeper
version: 1d7be4effb13d2d908342d349d71a284a7542693
subpackages:
@ -154,7 +154,7 @@ imports:
subpackages:
- codec
- name: golang.org/x/image
version: 83686c547965220f8b5d75e83ddc67d73420a89f
version: 69afd001f792d732a78bd7225793315a8deb09ea
subpackages:
- draw
- font

View File

@ -54,7 +54,7 @@ import:
- package: github.com/gyuho/dataframe
version: e715fd4225590f6558961c316924e56a8a2647b0
- package: github.com/gyuho/psn
version: 5a0ffadd2ecaca939e373bf01aa45ac66c8fa2c0
version: 2c7fdc8639b23472a462bdc3001cadfa586c1b39
- package: github.com/hashicorp/consul
version: 8d57727ff0d113a97c89a992e4681680f95cbf03
subpackages:

View File

@ -19,6 +19,9 @@ type EntryFilter struct {
LocalPort int64
RemotePort int64
// for ps
TopCommandPath string
// for Proc
DiskDevice string
NetworkInterface string
@ -75,6 +78,11 @@ func WithTCP6() FilterFunc {
return func(ft *EntryFilter) { ft.TCP6 = true }
}
// WithTopCommandPath configures 'top' command path.
func WithTopCommandPath(path string) FilterFunc {
return func(ft *EntryFilter) { ft.TopCommandPath = path }
}
// WithDiskDevice to filter entries by disk device.
func WithDiskDevice(name string) FilterFunc {
return func(ft *EntryFilter) { ft.DiskDevice = name }
@ -111,4 +119,8 @@ func (ft *EntryFilter) applyOpts(opts []FilterFunc) {
if ft.LocalPort > 0 && ft.RemotePort > 0 {
panic(fmt.Errorf("can't query by both local(%d) and remote(%d) ports", ft.LocalPort, ft.RemotePort))
}
if ft.TopCommandPath == "" {
ft.TopCommandPath = DefaultTopPath
}
}

View File

@ -1,6 +1,6 @@
package psn
// updated at 2017-01-08 13:43:03.746568457 -0800 PST
// updated at 2017-01-31 14:04:04.136777234 -0800 PST
// NetDev is '/proc/net/dev' in Linux.
// The dev pseudo-file contains network device status information.
@ -78,6 +78,57 @@ type NetTCP struct {
Inode string `column:"inode"`
}
// TopCommandRow represents a row in 'top' command output.
type TopCommandRow struct {
// PID is pid of the process.
PID int64 `column:"pid"`
// USER is user name.
USER string `column:"user"`
// PR is priority.
PR string `column:"pr"`
// NI is nice value of the task.
NI string `column:"ni"`
// VIRT is total amount of virtual memory used by the task (in KiB).
VIRT string `column:"virt"`
VIRTBytesN uint64 `column:"virt_bytes_n"`
VIRTParsedBytes string `column:"virt_parsed_bytes"`
// RES is non-swapped physical memory a task is using (in KiB).
RES string `column:"res"`
RESBytesN uint64 `column:"res_bytes_n"`
RESParsedBytes string `column:"res_parsed_bytes"`
// SHR is amount of shared memory available to a task, not all of which is typically resident (in KiB).
SHR string `column:"shr"`
SHRBytesN uint64 `column:"shr_bytes_n"`
SHRParsedBytes string `column:"shr_parsed_bytes"`
// S is process status.
S string `column:"s"`
SParsedStatus string `column:"s_parsed_status"`
// CPUPercent is %CPU.
CPUPercent float64 `column:"cpupercent"`
// MEMPercent is %MEM.
MEMPercent float64 `column:"mempercent"`
// TIME is CPU time (TIME+).
TIME string `column:"time"`
// COMMAND is command.
COMMAND string `column:"command"`
}
// LoadAvg is '/proc/loadavg' in Linux.
type LoadAvg struct {
// LoadAvg1Minute is total uptime in seconds.
LoadAvg1Minute float64 `column:"load_avg_1_minute"`
// LoadAvg5Minute is total uptime in seconds.
LoadAvg5Minute float64 `column:"load_avg_5_minute"`
// LoadAvg15Minute is total uptime in seconds.
LoadAvg15Minute float64 `column:"load_avg_15_minute"`
// RunnableKernelSchedulingEntities is number of currently runnable kernel scheduling entities (processes, threads).
RunnableKernelSchedulingEntities int64 `column:"runnable_kernel_scheduling_entities"`
// CurrentKernelSchedulingEntities is number of kernel scheduling entities that currently exist on the system.
CurrentKernelSchedulingEntities int64 `column:"current_kernel_scheduling_entities"`
// Pid is PID of the process that was most recently created on the system.
Pid int64 `column:"pid"`
}
// Uptime is '/proc/uptime' in Linux.
type Uptime struct {
// UptimeTotal is total uptime in seconds.
@ -265,7 +316,6 @@ type Stat struct {
EnvEnd uint64 `column:"env_end"`
// ExitCode is thread's exit status in the form reported by waitpid(2).
ExitCode int64 `column:"exit_code"`
CpuUsage float64 `column:"cpu_usage"`
}
// Status is '/proc/$PID/status' in Linux.

View File

@ -34,7 +34,7 @@ type PSEntry struct {
VMSizeNum uint64
}
const maxConcurrentProcStat = 32
const maxConcurrentProcFDLimit = 32
// GetPS finds all PSEntry by given filter.
func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) {
@ -61,13 +61,9 @@ func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) {
// applyOpts already panic when ft.ProgramMatchFunc != nil && ft.PID > 0
}
up, err := GetProcUptime()
if err != nil {
return nil, err
}
// can't filter both by program and by PID
if len(pids) == 0 {
// find PIDs by Program
// list all PIDs, or later to match by Program
if pids, err = ListPIDs(); err != nil {
return
}
@ -75,10 +71,33 @@ func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) {
ft.ProgramMatchFunc = func(string) bool { return true }
}
var topRows []TopCommandRow
if len(pids) == 1 {
topRows, err = GetTop(ft.TopCommandPath, pids[0])
if err != nil {
return
}
} else {
topRows, err = GetTop(ft.TopCommandPath, 0)
if err != nil {
return
}
}
topM := make(map[int64]TopCommandRow, len(topRows))
for _, row := range topRows {
topM[row.PID] = row
}
for _, pid := range pids {
if _, ok := topM[pid]; !ok {
topM[pid] = TopCommandRow{PID: pid}
log.Printf("PID %d is not found at 'top' command output", pid)
}
}
var pmu sync.RWMutex
var wg sync.WaitGroup
wg.Add(len(pids))
limitc := make(chan struct{}, maxConcurrentProcStat)
limitc := make(chan struct{}, maxConcurrentProcFDLimit)
for _, pid := range pids {
go func(pid int64) {
defer func() {
@ -88,12 +107,8 @@ func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) {
limitc <- struct{}{}
stat, err := GetProcStatByPID(pid, up)
if err != nil {
log.Printf("GetProcStatByPID error %v for PID %d", err, pid)
return
}
if !ft.ProgramMatchFunc(stat.Comm) {
topRow := topM[pid]
if !ft.ProgramMatchFunc(topRow.COMMAND) {
return
}
@ -104,7 +119,7 @@ func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) {
return
}
ent, err := getPSEntry(pid, stat)
ent, err := getPSEntry(pid, topRow)
if err != nil {
log.Printf("getPSEntry error %v for PID %d", err, pid)
return
@ -123,20 +138,20 @@ func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) {
return
}
func getPSEntry(pid int64, stat Stat) (PSEntry, error) {
func getPSEntry(pid int64, topRow TopCommandRow) (PSEntry, error) {
status, err := GetProcStatusByPID(pid)
if err != nil {
return PSEntry{}, err
}
entry := PSEntry{
Program: stat.Comm,
State: stat.StateParsedStatus,
Program: status.Name,
State: status.StateParsedStatus,
PID: stat.Pid,
PPID: stat.Ppid,
PID: status.Pid,
PPID: status.PPid,
CPU: fmt.Sprintf("%3.2f %%", stat.CpuUsage),
CPU: fmt.Sprintf("%3.2f %%", topRow.CPUPercent),
VMRSS: status.VmRSSParsedBytes,
VMSize: status.VmSizeParsedBytes,
@ -146,7 +161,7 @@ func getPSEntry(pid int64, stat Stat) (PSEntry, error) {
VoluntaryCtxtSwitches: status.VoluntaryCtxtSwitches,
NonvoluntaryCtxtSwitches: status.NonvoluntaryCtxtSwitches,
CPUNum: stat.CpuUsage,
CPUNum: topRow.CPUPercent,
VMRSSNum: status.VmRSSBytesN,
VMSizeNum: status.VmSizeBytesN,
}

View File

@ -54,139 +54,69 @@ func GetSS(opts ...FilterFunc) (sss []SSEntry, err error) {
// applyOpts already panic when ft.ProgramMatchFunc != nil && ft.PID > 0
}
var pmu sync.RWMutex
var wg sync.WaitGroup
if len(pids) > 0 {
// we already know PIDs to query
wg.Add(len(pids))
if ft.TCP && ft.TCP6 {
wg.Add(len(pids))
}
for _, pid := range pids {
if ft.TCP {
go func(pid int64) {
defer wg.Done()
ents, err := getSSEntry(pid, TypeTCP, ft.LocalPort, ft.RemotePort)
if err != nil {
log.Printf("getSSEntry error %v for PID %d", err, pid)
return
}
pmu.RLock()
done := ft.TopLimit > 0 && len(sss) >= ft.TopLimit
pmu.RUnlock()
if done {
return
}
pmu.Lock()
sss = append(sss, ents...)
pmu.Unlock()
}(pid)
}
if ft.TCP6 {
go func(pid int64) {
defer wg.Done()
ents, err := getSSEntry(pid, TypeTCP6, ft.LocalPort, ft.RemotePort)
if err != nil {
log.Printf("getSSEntry error %v for PID %d", err, pid)
return
}
pmu.RLock()
done := ft.TopLimit > 0 && len(sss) >= ft.TopLimit
pmu.RUnlock()
if done {
return
}
pmu.Lock()
sss = append(sss, ents...)
pmu.Unlock()
}(pid)
}
}
} else {
// find PIDs by Program
pids, err = ListPIDs()
if err != nil {
return
}
up, err := GetProcUptime()
if err != nil {
return nil, err
}
if len(pids) == 0 {
// find PIDs by Program
if pids, err = ListPIDs(); err != nil {
return
}
} else {
// already know PIDs to query
ft.ProgramMatchFunc = func(string) bool { return true }
}
var pmu sync.RWMutex
var wg sync.WaitGroup
limitc := make(chan struct{}, maxConcurrentProcFDLimit)
f := func(pid int64, ttype TransportProtocol) {
defer func() {
<-limitc
wg.Done()
}()
limitc <- struct{}{}
stat, err := GetProcStatByPID(pid, up)
if err != nil {
log.Printf("GetProcStatByPID error %v for PID %d", err, pid)
return
}
if !ft.ProgramMatchFunc(stat.Comm) {
return
}
pmu.RLock()
done := ft.TopLimit > 0 && len(sss) >= ft.TopLimit
pmu.RUnlock()
if done {
return
}
ents, err := getSSEntry(pid, ttype, ft.LocalPort, ft.RemotePort)
if err != nil {
log.Printf("getSSEntry error %v for PID %d", err, pid)
return
}
pmu.Lock()
sss = append(sss, ents...)
pmu.Unlock()
}
wg.Add(len(pids))
if ft.TCP && ft.TCP6 {
wg.Add(len(pids))
}
for _, pid := range pids {
if ft.TCP {
go func(pid int64) {
defer wg.Done()
stat, err := GetProcStatByPID(pid, up)
if err != nil {
log.Printf("GetProcStatByPID error %v for PID %d", err, pid)
return
}
if !ft.ProgramMatchFunc(stat.Comm) {
return
}
pmu.RLock()
done := ft.TopLimit > 0 && len(sss) >= ft.TopLimit
pmu.RUnlock()
if done {
return
}
ents, err := getSSEntry(pid, TypeTCP, ft.LocalPort, ft.RemotePort)
if err != nil {
log.Printf("getSSEntry error %v for PID %d", err, pid)
return
}
pmu.Lock()
sss = append(sss, ents...)
pmu.Unlock()
}(pid)
go f(pid, TypeTCP)
}
if ft.TCP6 {
go func(pid int64) {
defer wg.Done()
stat, err := GetProcStatByPID(pid, up)
if err != nil {
log.Printf("GetProcStatByPID error %v for PID %d", err, pid)
return
}
if !ft.ProgramMatchFunc(stat.Comm) {
return
}
pmu.RLock()
done := ft.TopLimit > 0 && len(sss) >= ft.TopLimit
pmu.RUnlock()
if done {
return
}
ents, err := getSSEntry(pid, TypeTCP6, ft.LocalPort, ft.RemotePort)
if err != nil {
log.Printf("getSSEntry error %v for PID %d", err, pid)
return
}
pmu.Lock()
sss = append(sss, ents...)
pmu.Unlock()
}(pid)
}
go f(pid, TypeTCP6)
}
}
wg.Wait()

79
vendor/github.com/gyuho/psn/proc.go generated vendored
View File

@ -11,6 +11,7 @@ type Proc struct {
UnixTS int64
PSEntry PSEntry
LoadAvg LoadAvg
DSEntry DSEntry
ReadsCompletedDelta uint64
@ -40,7 +41,7 @@ func GetProc(opts ...FilterFunc) (Proc, error) {
if ft.PID == 0 {
return Proc{}, fmt.Errorf("unknown PID %d", ft.PID)
}
proc := Proc{UnixTS: time.Now().Unix()}
proc := Proc{UnixTS: time.Now().UnixNano()}
errc := make(chan error)
go func() {
@ -58,6 +59,16 @@ func GetProc(opts ...FilterFunc) (Proc, error) {
errc <- nil
}()
go func() {
lvg, err := GetProcLoadAvg()
if err != nil {
errc <- err
return
}
proc.LoadAvg = lvg
errc <- nil
}()
if ft.DiskDevice != "" {
go func() {
// get diskstats
@ -112,7 +123,7 @@ func GetProc(opts ...FilterFunc) (Proc, error) {
}
cnt := 0
for cnt != len(opts) {
for cnt != len(opts)+1 { // include load avg query
err := <-errc
if err != nil {
return Proc{}, err
@ -143,6 +154,11 @@ var (
func init() {
// more columns to 'ProcHeader'
ProcHeader = append(ProcHeader,
"LOAD-AVERAGE-1-MINUTE",
"LOAD-AVERAGE-5-MINUTE",
"LOAD-AVERAGE-15-MINUTE",
)
ProcHeader = append(ProcHeader, columnsDSEntry...)
ProcHeader = append(ProcHeader, columnsNSEntry...)
ProcHeader = append(ProcHeader,
@ -167,6 +183,7 @@ func init() {
}
// ToRow converts 'Proc' to string slice.
// Make sure to change this whenever 'Proc' fields are updated.
func (p *Proc) ToRow() (row []string) {
row = make([]string, len(ProcHeader))
row[0] = fmt.Sprintf("%d", p.UnixTS) // UNIX-TS
@ -186,37 +203,41 @@ func (p *Proc) ToRow() (row []string) {
row[13] = fmt.Sprintf("%d", p.PSEntry.VMRSSNum) // VMRSS-NUM
row[14] = fmt.Sprintf("%d", p.PSEntry.VMSizeNum) // VMSIZE-NUM
row[15] = p.DSEntry.Device // DEVICE
row[16] = fmt.Sprintf("%d", p.DSEntry.ReadsCompleted) // READS-COMPLETED
row[17] = fmt.Sprintf("%d", p.DSEntry.SectorsRead) // SECTORS-READ
row[18] = p.DSEntry.TimeSpentOnReading // TIME(READS)
row[19] = fmt.Sprintf("%d", p.DSEntry.WritesCompleted) // WRITES-COMPLETED
row[20] = fmt.Sprintf("%d", p.DSEntry.SectorsWritten) // SECTORS-WRITTEN
row[21] = p.DSEntry.TimeSpentOnWriting // TIME(WRITES)
row[22] = fmt.Sprintf("%d", p.DSEntry.TimeSpentOnReadingMs) // MILLISECONDS(READS)
row[23] = fmt.Sprintf("%d", p.DSEntry.TimeSpentOnWritingMs) // MILLISECONDS(WRITES)
row[15] = fmt.Sprintf("%3.2f", p.LoadAvg.LoadAvg1Minute) // LOAD-AVERAGE-1-MINUTE
row[16] = fmt.Sprintf("%3.2f", p.LoadAvg.LoadAvg5Minute) // LOAD-AVERAGE-5-MINUTE
row[17] = fmt.Sprintf("%3.2f", p.LoadAvg.LoadAvg15Minute) // LOAD-AVERAGE-15-MINUTE
row[24] = p.NSEntry.Interface // INTERFACE
row[25] = p.NSEntry.ReceiveBytes // RECEIVE-BYTES
row[26] = fmt.Sprintf("%d", p.NSEntry.ReceivePackets) // RECEIVE-PACKETS
row[27] = p.NSEntry.TransmitBytes // TRANSMIT-BYTES
row[28] = fmt.Sprintf("%d", p.NSEntry.TransmitPackets) // TRANSMIT-PACKETS
row[29] = fmt.Sprintf("%d", p.NSEntry.ReceiveBytesNum) // RECEIVE-BYTES-NUM
row[30] = fmt.Sprintf("%d", p.NSEntry.TransmitBytesNum) // TRANSMIT-BYTES-NUM
row[18] = p.DSEntry.Device // DEVICE
row[19] = fmt.Sprintf("%d", p.DSEntry.ReadsCompleted) // READS-COMPLETED
row[20] = fmt.Sprintf("%d", p.DSEntry.SectorsRead) // SECTORS-READ
row[21] = p.DSEntry.TimeSpentOnReading // TIME(READS)
row[22] = fmt.Sprintf("%d", p.DSEntry.WritesCompleted) // WRITES-COMPLETED
row[23] = fmt.Sprintf("%d", p.DSEntry.SectorsWritten) // SECTORS-WRITTEN
row[24] = p.DSEntry.TimeSpentOnWriting // TIME(WRITES)
row[25] = fmt.Sprintf("%d", p.DSEntry.TimeSpentOnReadingMs) // MILLISECONDS(READS)
row[26] = fmt.Sprintf("%d", p.DSEntry.TimeSpentOnWritingMs) // MILLISECONDS(WRITES)
row[31] = fmt.Sprintf("%d", p.ReadsCompletedDelta) // READS-COMPLETED-DELTA
row[32] = fmt.Sprintf("%d", p.SectorsReadDelta) // SECTORS-READ-DELTA
row[33] = fmt.Sprintf("%d", p.WritesCompletedDelta) // WRITES-COMPLETED-DELTA
row[34] = fmt.Sprintf("%d", p.SectorsWrittenDelta) // SECTORS-WRITTEN-DELTA
row[27] = p.NSEntry.Interface // INTERFACE
row[28] = p.NSEntry.ReceiveBytes // RECEIVE-BYTES
row[29] = fmt.Sprintf("%d", p.NSEntry.ReceivePackets) // RECEIVE-PACKETS
row[30] = p.NSEntry.TransmitBytes // TRANSMIT-BYTES
row[31] = fmt.Sprintf("%d", p.NSEntry.TransmitPackets) // TRANSMIT-PACKETS
row[32] = fmt.Sprintf("%d", p.NSEntry.ReceiveBytesNum) // RECEIVE-BYTES-NUM
row[33] = fmt.Sprintf("%d", p.NSEntry.TransmitBytesNum) // TRANSMIT-BYTES-NUM
row[35] = p.ReceiveBytesDelta // RECEIVE-BYTES-DELTA
row[36] = fmt.Sprintf("%d", p.ReceivePacketsDelta) // RECEIVE-PACKETS-DELTA
row[37] = p.TransmitBytesDelta // TRANSMIT-BYTES-DELTA
row[38] = fmt.Sprintf("%d", p.TransmitPacketsDelta) // TRANSMIT-PACKETS-DELTA
row[39] = fmt.Sprintf("%d", p.ReceiveBytesNumDelta) // RECEIVE-BYTES-NUM-DELTA
row[40] = fmt.Sprintf("%d", p.TransmitBytesNumDelta) // TRANSMIT-BYTES-NUM-DELTA
row[34] = fmt.Sprintf("%d", p.ReadsCompletedDelta) // READS-COMPLETED-DELTA
row[35] = fmt.Sprintf("%d", p.SectorsReadDelta) // SECTORS-READ-DELTA
row[36] = fmt.Sprintf("%d", p.WritesCompletedDelta) // WRITES-COMPLETED-DELTA
row[37] = fmt.Sprintf("%d", p.SectorsWrittenDelta) // SECTORS-WRITTEN-DELTA
row[41] = string(p.Extra) // EXTRA
row[38] = p.ReceiveBytesDelta // RECEIVE-BYTES-DELTA
row[39] = fmt.Sprintf("%d", p.ReceivePacketsDelta) // RECEIVE-PACKETS-DELTA
row[40] = p.TransmitBytesDelta // TRANSMIT-BYTES-DELTA
row[41] = fmt.Sprintf("%d", p.TransmitPacketsDelta) // TRANSMIT-PACKETS-DELTA
row[42] = fmt.Sprintf("%d", p.ReceiveBytesNumDelta) // RECEIVE-BYTES-NUM-DELTA
row[43] = fmt.Sprintf("%d", p.TransmitBytesNumDelta) // TRANSMIT-BYTES-NUM-DELTA
row[44] = string(p.Extra) // EXTRA
return
}

View File

@ -24,7 +24,9 @@ type CSV struct {
// ExtraPath contains extra information.
ExtraPath string
// Rows are sorted by unix seconds.
// Rows are sorted by unix time in nanoseconds.
// It's the number of nanoseconds (not seconds) elapsed
// since January 1, 1970 UTC.
Rows []Proc
}
@ -47,8 +49,9 @@ func NewCSV(fpath string, pid int64, diskDevice string, networkInterface string,
}
}
// Add is to be called periodically to add a row to CSV.
// It only appends to CSV. And it estimates empty rows by unix seconds.
// Add is called periodically to append a new entry to CSV; it only appends.
// If the data is used for time series, make sure to handle missing time stamps between.
// e.g. interpolate by estimating the averages between last row and new row to be inserted.
func (c *CSV) Add() error {
cur, err := GetProc(
WithPID(c.PID),
@ -71,14 +74,12 @@ func (c *CSV) Add() error {
// compare with previous row before append
prev := c.Rows[len(c.Rows)-1]
if prev.UnixTS >= cur.UnixTS {
// ignore data with wrong seconds
return nil
return fmt.Errorf("clock went backwards: got %v, but expected more than %v", cur.UnixTS, prev.UnixTS)
}
// 'Add' only appends, so later unix should be max
c.MaxUnixTS = cur.UnixTS
if cur.UnixTS-prev.UnixTS == 1 {
cur.ReadsCompletedDelta = cur.DSEntry.ReadsCompleted - prev.DSEntry.ReadsCompleted
cur.SectorsReadDelta = cur.DSEntry.SectorsRead - prev.DSEntry.SectorsRead
cur.WritesCompletedDelta = cur.DSEntry.WritesCompleted - prev.DSEntry.WritesCompleted
@ -94,85 +95,6 @@ func (c *CSV) Add() error {
c.Rows = append(c.Rows, cur)
return nil
}
// there are empty rows between; estimate and fill-in
tsDelta := cur.UnixTS - prev.UnixTS
nexts := make([]Proc, 0, tsDelta+1)
// estimate the previous ones based on 'prev' and 'cur'
mid := prev
// Extra; just use the previous value
mid.Extra = prev.Extra
// PSEntry; just use average since some metrisc might decrease
mid.PSEntry.FD = prev.PSEntry.FD + (cur.PSEntry.FD-prev.PSEntry.FD)/2
mid.PSEntry.Threads = prev.PSEntry.Threads + (cur.PSEntry.Threads-prev.PSEntry.Threads)/2
mid.PSEntry.CPUNum = prev.PSEntry.CPUNum + (cur.PSEntry.CPUNum-prev.PSEntry.CPUNum)/2
mid.PSEntry.VMRSSNum = prev.PSEntry.VMRSSNum + (cur.PSEntry.VMRSSNum-prev.PSEntry.VMRSSNum)/2
mid.PSEntry.VMSizeNum = prev.PSEntry.VMSizeNum + (cur.PSEntry.VMSizeNum-prev.PSEntry.VMSizeNum)/2
mid.PSEntry.CPU = fmt.Sprintf("%3.2f %%", mid.PSEntry.CPUNum)
mid.PSEntry.VMRSS = humanize.Bytes(mid.PSEntry.VMRSSNum)
mid.PSEntry.VMSize = humanize.Bytes(mid.PSEntry.VMSizeNum)
// DSEntry; calculate delta assuming that metrics are cumulative
mid.ReadsCompletedDelta = (cur.DSEntry.ReadsCompleted - prev.DSEntry.ReadsCompleted) / uint64(tsDelta)
mid.SectorsReadDelta = (cur.DSEntry.SectorsRead - prev.DSEntry.SectorsRead) / uint64(tsDelta)
mid.WritesCompletedDelta = (cur.DSEntry.WritesCompleted - prev.DSEntry.WritesCompleted) / uint64(tsDelta)
mid.SectorsWrittenDelta = (cur.DSEntry.SectorsWritten - prev.DSEntry.SectorsWritten) / uint64(tsDelta)
timeSpentOnReadingMsDelta := (cur.DSEntry.TimeSpentOnReadingMs - prev.DSEntry.TimeSpentOnReadingMs) / uint64(tsDelta)
timeSpentOnWritingMsDelta := (cur.DSEntry.TimeSpentOnWritingMs - prev.DSEntry.TimeSpentOnWritingMs) / uint64(tsDelta)
// NSEntry; calculate delta assuming that metrics are cumulative
mid.ReceiveBytesNumDelta = (cur.NSEntry.ReceiveBytesNum - prev.NSEntry.ReceiveBytesNum) / uint64(tsDelta)
mid.ReceiveBytesDelta = humanize.Bytes(mid.ReceiveBytesNumDelta)
mid.ReceivePacketsDelta = (cur.NSEntry.ReceivePackets - prev.NSEntry.ReceivePackets) / uint64(tsDelta)
mid.TransmitBytesNumDelta = (cur.NSEntry.TransmitBytesNum - prev.NSEntry.TransmitBytesNum) / uint64(tsDelta)
mid.TransmitBytesDelta = humanize.Bytes(mid.TransmitBytesNumDelta)
mid.TransmitPacketsDelta = (cur.NSEntry.TransmitPackets - prev.NSEntry.TransmitPackets) / uint64(tsDelta)
for i := int64(1); i < tsDelta; i++ {
ev := mid
ev.UnixTS = prev.UnixTS + i
ev.DSEntry.ReadsCompleted += mid.ReadsCompletedDelta * uint64(i)
ev.DSEntry.SectorsRead += mid.SectorsReadDelta * uint64(i)
ev.DSEntry.WritesCompleted += mid.WritesCompletedDelta * uint64(i)
ev.DSEntry.SectorsWritten += mid.SectorsWrittenDelta * uint64(i)
ev.DSEntry.TimeSpentOnReadingMs += timeSpentOnReadingMsDelta * uint64(i)
ev.DSEntry.TimeSpentOnWritingMs += timeSpentOnWritingMsDelta * uint64(i)
ev.DSEntry.TimeSpentOnReading = humanizeDurationMs(ev.DSEntry.TimeSpentOnReadingMs)
ev.DSEntry.TimeSpentOnWriting = humanizeDurationMs(ev.DSEntry.TimeSpentOnWritingMs)
ev.NSEntry.ReceiveBytesNum += mid.ReceiveBytesNumDelta * uint64(i)
ev.NSEntry.ReceiveBytes = humanize.Bytes(ev.NSEntry.ReceiveBytesNum)
ev.NSEntry.ReceivePackets += mid.ReceivePacketsDelta * uint64(i)
ev.NSEntry.TransmitBytesNum += mid.TransmitBytesNumDelta * uint64(i)
ev.NSEntry.TransmitBytes = humanize.Bytes(ev.NSEntry.TransmitBytesNum)
ev.NSEntry.TransmitPackets += mid.TransmitPacketsDelta * uint64(i)
nexts = append(nexts, ev)
}
// now previous entry is estimated; update 'cur' Delta metrics
realPrev := nexts[len(nexts)-1]
cur.ReadsCompletedDelta = cur.DSEntry.ReadsCompleted - realPrev.DSEntry.ReadsCompleted
cur.SectorsReadDelta = cur.DSEntry.SectorsRead - realPrev.DSEntry.SectorsRead
cur.WritesCompletedDelta = cur.DSEntry.WritesCompleted - realPrev.DSEntry.WritesCompleted
cur.SectorsWrittenDelta = cur.DSEntry.SectorsWritten - realPrev.DSEntry.SectorsWritten
cur.ReceiveBytesNumDelta = cur.NSEntry.ReceiveBytesNum - realPrev.NSEntry.ReceiveBytesNum
cur.TransmitBytesNumDelta = cur.NSEntry.TransmitBytesNum - realPrev.NSEntry.TransmitBytesNum
cur.ReceivePacketsDelta = cur.NSEntry.ReceivePackets - realPrev.NSEntry.ReceivePackets
cur.TransmitPacketsDelta = cur.NSEntry.TransmitPackets - realPrev.NSEntry.TransmitPackets
cur.ReceiveBytesDelta = humanize.Bytes(cur.ReceiveBytesNumDelta)
cur.TransmitBytesDelta = humanize.Bytes(cur.TransmitBytesNumDelta)
c.Rows = append(c.Rows, append(nexts, cur)...)
return nil
}
// Save saves CSV to disk.
@ -201,6 +123,7 @@ func (c *CSV) Save() error {
}
// ReadCSV reads a CSV file and convert to 'CSV'.
// Make sure to change this whenever 'Proc' fields are updated.
func ReadCSV(fpath string) (*CSV, error) {
f, err := openToRead(fpath)
if err != nil {
@ -289,6 +212,19 @@ func ReadCSV(fpath string) (*CSV, error) {
return nil, err
}
loadAvg1min, err := strconv.ParseFloat(row[ProcHeaderIndex["LOAD-AVERAGE-1-MINUTE"]], 64)
if err != nil {
return nil, err
}
loadAvg5min, err := strconv.ParseFloat(row[ProcHeaderIndex["LOAD-AVERAGE-5-MINUTE"]], 64)
if err != nil {
return nil, err
}
loadAvg15min, err := strconv.ParseFloat(row[ProcHeaderIndex["LOAD-AVERAGE-15-MINUTE"]], 64)
if err != nil {
return nil, err
}
readsCompleted, err := strconv.ParseUint(row[ProcHeaderIndex["READS-COMPLETED"]], 10, 64)
if err != nil {
return nil, err
@ -384,6 +320,12 @@ func ReadCSV(fpath string) (*CSV, error) {
VMSizeNum: vmSizeNum,
},
LoadAvg: LoadAvg{
LoadAvg1Minute: loadAvg1min,
LoadAvg5Minute: loadAvg5min,
LoadAvg15Minute: loadAvg15min,
},
DSEntry: DSEntry{
Device: row[ProcHeaderIndex["DEVICE"]],
ReadsCompleted: readsCompleted,

93
vendor/github.com/gyuho/psn/proc_loadavg_linux.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
package psn
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
)
type procLoadAvgColumnIndex int
const (
proc_loadavg_idx_load_avg_1_minute procLoadAvgColumnIndex = iota
proc_loadavg_idx_load_avg_5_minute
proc_loadavg_idx_load_avg_15_minute
proc_loadavg_idx_kernel_scheduling_entities_with_slash
proc_loadavg_idx_pid
)
// GetProcLoadAvg reads '/proc/loadavg'.
// Expected output is '0.37 0.47 0.39 1/839 31397'.
func GetProcLoadAvg() (LoadAvg, error) {
txt, err := readProcLoadAvg()
if err != nil {
return LoadAvg{}, err
}
return getProcLoadAvg(txt)
}
func readProcLoadAvg() (string, error) {
f, err := openToRead("/proc/loadavg")
if err != nil {
return "", err
}
defer f.Close()
bts, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
return strings.TrimSpace(string(bts)), nil
}
func getProcLoadAvg(txt string) (LoadAvg, error) {
ds := strings.Fields(txt)
if len(ds) < 5 {
return LoadAvg{}, fmt.Errorf("not enough columns at %v", ds)
}
lavg := LoadAvg{}
avg1, err := strconv.ParseFloat(ds[proc_loadavg_idx_load_avg_1_minute], 64)
if err != nil {
return LoadAvg{}, err
}
lavg.LoadAvg1Minute = avg1
avg5, err := strconv.ParseFloat(ds[proc_loadavg_idx_load_avg_5_minute], 64)
if err != nil {
return LoadAvg{}, err
}
lavg.LoadAvg5Minute = avg5
avg15, err := strconv.ParseFloat(ds[proc_loadavg_idx_load_avg_15_minute], 64)
if err != nil {
return LoadAvg{}, err
}
lavg.LoadAvg15Minute = avg15
slashed := strings.Split(ds[proc_loadavg_idx_kernel_scheduling_entities_with_slash], "/")
if len(slashed) != 2 {
return LoadAvg{}, fmt.Errorf("expected '/' string in kernel scheduling entities field, got %v", slashed)
}
s1, err := strconv.ParseInt(slashed[0], 10, 64)
if err != nil {
return LoadAvg{}, err
}
lavg.RunnableKernelSchedulingEntities = s1
s2, err := strconv.ParseInt(slashed[1], 10, 64)
if err != nil {
return LoadAvg{}, err
}
lavg.CurrentKernelSchedulingEntities = s2
pid, err := strconv.ParseInt(ds[proc_loadavg_idx_pid], 10, 64)
if err != nil {
return LoadAvg{}, err
}
lavg.Pid = pid
return lavg, nil
}

View File

@ -111,7 +111,7 @@ func parseProcStat(pid int64, up Uptime) (Stat, error) {
hF := s.FieldByName(column + "ParsedStatus")
if hF.IsValid() {
if hF.CanSet() {
hF.SetString(strings.TrimSpace(fv))
hF.SetString(convertProcStatus(fv))
}
}
}
@ -138,43 +138,15 @@ func (s *Stat) update(up Uptime) (Stat, error) {
if strings.HasSuffix(s.Comm, ")") {
s.Comm = s.Comm[:len(s.Comm)-1]
}
cu, err := s.getCPU(up)
if err != nil {
return Stat{}, err
}
s.CpuUsage = cu
return *s, nil
}
// getCPU returns the average CPU usage in percentage.
// http://stackoverflow.com/questions/16726779/how-do-i-get-the-total-cpu-usage-of-an-application-from-proc-pid-stat
func (s Stat) getCPU(up Uptime) (float64, error) {
totalSec := s.Utime + s.Stime
totalSec += s.Cutime + s.Cstime
out, err := exec.Command("/usr/bin/getconf", "CLK_TCK").Output()
if err != nil {
return 0, err
}
ot := strings.TrimSpace(strings.Replace(string(out), "\n", "", -1))
hertz, err := strconv.ParseUint(ot, 10, 64)
if err != nil || hertz == 0 {
return 0, err
}
tookSec := up.UptimeTotal - (float64(s.Starttime) / float64(hertz))
if hertz == 0 || tookSec == 0.0 {
return 0.0, nil
}
return 100 * ((float64(totalSec) / float64(hertz)) / float64(tookSec)), nil
}
const statTmpl = `
----------------------------------------
[/proc/{{.Pid}}/stat]
Name: {{.Comm}}
State: {{.State}}
State: {{.StateParsedStatus}}
Pid: {{.Pid}}
Ppid: {{.Ppid}}
@ -183,7 +155,6 @@ NumThreads: {{.NumThreads}}
Rss: {{.RssParsedBytes}} ({{.RssBytesN}})
Rsslim: {{.RsslimParsedBytes}} ({{.RsslimBytesN}})
Vsize: {{.VsizeParsedBytes}} ({{.VsizeBytesN}})
CpuUsage: {{.CpuUsage}} %
Starttime: {{.Starttime}}
Utime: {{.Utime}}
@ -246,3 +217,28 @@ func (s Stat) String() string {
}
return buf.String()
}
// GetCPUPercentage returns the average CPU usage in percentage.
// http://stackoverflow.com/questions/16726779/how-do-i-get-the-total-cpu-usage-of-an-application-from-proc-pid-stat
// This sometimes differ from the one in 'top' command.
// So do not use it!
func (s Stat) GetCPUPercentage(up Uptime) (float64, error) {
totalSec := s.Utime + s.Stime
totalSec += s.Cutime + s.Cstime
out, err := exec.Command("/usr/bin/getconf", "CLK_TCK").Output()
if err != nil {
return 0, err
}
ot := strings.TrimSpace(strings.Replace(string(out), "\n", "", -1))
hertz, err := strconv.ParseUint(ot, 10, 64)
if err != nil || hertz == 0 {
return 0, err
}
tookSec := up.UptimeTotal - (float64(s.Starttime) / float64(hertz))
if hertz == 0 || tookSec == 0.0 {
return 0.0, nil
}
return 100 * ((float64(totalSec) / float64(hertz)) / float64(tookSec)), nil
}

View File

@ -8,6 +8,8 @@ type RawDataType int
const (
TypeBytes RawDataType = iota
TypeInt64
TypeFloat64
TypeTimeMicroseconds
TypeTimeSeconds
TypeIPAddress
@ -87,6 +89,47 @@ var NetTCP = RawData{
},
}
// TopCommandRow represents a row in 'top' command output.
// (See http://man7.org/linux/man-pages/man1/top.1.html).
var TopCommandRow = RawData{
IsYAML: false,
Columns: []Column{
{"PID", "pid of the process", reflect.Int64},
{"USER", "user name", reflect.String},
{"PR", "priority", reflect.String},
{"NI", "nice value of the task", reflect.String},
{"VIRT", "total amount of virtual memory used by the task (in KiB)", reflect.String},
{"RES", "non-swapped physical memory a task is using (in KiB)", reflect.String},
{"SHR", "amount of shared memory available to a task, not all of which is typically resident (in KiB)", reflect.String},
{"S", "process status", reflect.String},
{"CPUPercent", "%CPU", reflect.Float64},
{"MEMPercent", "%MEM", reflect.Float64},
{"TIME", "CPU time (TIME+)", reflect.String},
{"COMMAND", "command", reflect.String},
},
ColumnsToParse: map[string]RawDataType{
"S": TypeStatus,
"VIRT": TypeBytes,
"RES": TypeBytes,
"SHR": TypeBytes,
},
}
// LoadAvg represents '/proc/loadavg'
// (See http://man7.org/linux/man-pages/man5/proc.5.html).
var LoadAvg = RawData{
IsYAML: false,
Columns: []Column{
{"load-avg-1-minute", "total uptime in seconds", reflect.Float64},
{"load-avg-5-minute", "total uptime in seconds", reflect.Float64},
{"load-avg-15-minute", "total uptime in seconds", reflect.Float64},
{"runnable-kernel-scheduling-entities", "number of currently runnable kernel scheduling entities (processes, threads)", reflect.Int64},
{"current-kernel-scheduling-entities", "number of kernel scheduling entities that currently exist on the system", reflect.Int64},
{"pid", "PID of the process that was most recently created on the system", reflect.Int64},
},
ColumnsToParse: map[string]RawDataType{},
}
// Uptime represents '/proc/uptime'
// (See http://man7.org/linux/man-pages/man5/proc.5.html).
var Uptime = RawData{

277
vendor/github.com/gyuho/psn/top.go generated vendored Normal file
View File

@ -0,0 +1,277 @@
package psn
import (
"bytes"
"fmt"
"io"
"os/exec"
"reflect"
"strconv"
"strings"
humanize "github.com/dustin/go-humanize"
)
// GetTop returns all entries in 'top' command.
// If pid<1, it reads all processes in 'top' command.
func GetTop(topPath string, pid int64) ([]TopCommandRow, error) {
o, err := ReadTop(topPath, pid)
if err != nil {
return nil, err
}
return ParseTopOutput(o)
}
// GetTopDefault returns all entries in 'top' command.
// If pid<1, it reads all processes in 'top' command.
func GetTopDefault(pid int64) ([]TopCommandRow, error) {
o, err := ReadTop(DefaultTopPath, pid)
if err != nil {
return nil, err
}
return ParseTopOutput(o)
}
// DefaultTopPath is the default 'top' command path.
var DefaultTopPath = "/usr/bin/top"
// ReadTopDefault reads Linux 'top' command output.
func ReadTopDefault(pid int64) (string, error) {
return ReadTop(DefaultTopPath, pid)
}
// ReadTop reads Linux 'top' command output.
func ReadTop(topPath string, pid int64) (string, error) {
buf := new(bytes.Buffer)
err := readTop(topPath, pid, buf)
o := strings.TrimSpace(buf.String())
return o, err
}
func readTop(topPath string, pid int64, w io.Writer) error {
if !exist(topPath) {
return fmt.Errorf("%q does not exist", topPath)
}
topFlags := []string{"-b", "-n", "1"}
if pid > 0 {
topFlags = append(topFlags, "-p", fmt.Sprint(pid))
}
cmd := exec.Command(topPath, topFlags...)
cmd.Stdout = w
cmd.Stderr = w
return cmd.Run()
}
func convertProcStatus(s string) string {
ns := strings.TrimSpace(s)
if len(s) > 1 {
ns = ns[:1]
}
switch ns {
case "D":
return "D (uninterruptible sleep)"
case "R":
return "R (running)"
case "S":
return "S (sleeping)"
case "T":
return "T (stopped by job control signal)"
case "t":
return "t (stopped by debugger during trace)"
case "Z":
return "Z (zombie)"
default:
return fmt.Sprintf("unknown process %q", s)
}
}
// parses KiB strings, returns bytes in int64, and humanized bytes.
//
// KiB = kibibyte = 1024 bytes
// MiB = mebibyte = 1024 KiB = 1,048,576 bytes
// GiB = gibibyte = 1024 MiB = 1,073,741,824 bytes
// TiB = tebibyte = 1024 GiB = 1,099,511,627,776 bytes
// PiB = pebibyte = 1024 TiB = 1,125,899,906,842,624 bytes
// EiB = exbibyte = 1024 PiB = 1,152,921,504,606,846,976 bytes
//
func parseKiBInTop(s string) (bts uint64, hs string, err error) {
s = strings.TrimSpace(s)
switch {
// suffix 'm' means megabytes
case strings.HasSuffix(s, "m"):
ns := s[:len(s)-1]
var mib float64
mib, err = strconv.ParseFloat(ns, 64)
if err != nil {
return 0, "", err
}
bts = uint64(mib) * 1024 * 1024
// suffix 'g' means gigabytes
case strings.HasSuffix(s, "g"):
ns := s[:len(s)-1]
var gib float64
gib, err = strconv.ParseFloat(ns, 64)
if err != nil {
return 0, "", err
}
bts = uint64(gib) * 1024 * 1024 * 1024
default:
var kib float64
kib, err = strconv.ParseFloat(s, 64)
if err != nil {
return 0, "", err
}
bts = uint64(kib) * 1024
}
hs = humanize.Bytes(bts)
return
}
// TopRowHeaders is the headers in 'top' output.
var TopRowHeaders = []string{
"PID",
"USER",
"PR",
"NI",
"VIRT",
"RES",
"SHR",
"S",
"%CPU",
"%MEM",
"TIME+",
"COMMAND",
}
type topCommandOutputRowIdx int
const (
top_command_output_row_idx_pid topCommandOutputRowIdx = iota
top_command_output_row_idx_user
top_command_output_row_idx_pr
top_command_output_row_idx_ni
top_command_output_row_idx_virt
top_command_output_row_idx_res
top_command_output_row_idx_shr
top_command_output_row_idx_s
top_command_output_row_idx_cpu
top_command_output_row_idx_mem
top_command_output_row_idx_time
top_command_output_row_idx_command
)
// ParseTopOutput parses 'top' command output and returns the rows.
func ParseTopOutput(s string) ([]TopCommandRow, error) {
lines := strings.Split(s, "\n")
rows := make([][]string, 0, len(lines))
headerFound := false
for _, line := range lines {
if len(line) == 0 {
continue
}
ds := strings.Fields(strings.TrimSpace(line))
if ds[0] == "PID" { // header line
if !reflect.DeepEqual(ds, TopRowHeaders) {
return nil, fmt.Errorf("unexpected 'top' command header order (%v, expected %v)", ds, TopRowHeaders)
}
headerFound = true
continue
}
if !headerFound {
continue
}
row := strings.Fields(strings.TrimSpace(line))
if len(row) != len(TopRowHeaders) {
return nil, fmt.Errorf("unexpected row column number %v (expected %v)", row, TopRowHeaders)
}
rows = append(rows, row)
}
type result struct {
row TopCommandRow
err error
}
rc := make(chan result, len(rows))
for _, row := range rows {
go func(row []string) {
tr, err := parseTopRow(row)
rc <- result{row: tr, err: err}
}(row)
}
tcRows := make([]TopCommandRow, 0, len(rows))
for len(tcRows) != len(rows) {
select {
case rs := <-rc:
if rs.err != nil {
return nil, rs.err
}
tcRows = append(tcRows, rs.row)
}
}
return tcRows, nil
}
func parseTopRow(row []string) (TopCommandRow, error) {
trow := TopCommandRow{
USER: strings.TrimSpace(row[top_command_output_row_idx_user]),
}
pv, err := strconv.ParseInt(row[top_command_output_row_idx_pid], 10, 64)
if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
trow.PID = pv
trow.PR = strings.TrimSpace(row[top_command_output_row_idx_pr])
trow.NI = strings.TrimSpace(row[top_command_output_row_idx_ni])
virt, virtTxt, err := parseKiBInTop(row[top_command_output_row_idx_virt])
if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
trow.VIRT = row[top_command_output_row_idx_virt]
trow.VIRTBytesN = virt
trow.VIRTParsedBytes = virtTxt
res, resTxt, err := parseKiBInTop(row[top_command_output_row_idx_res])
if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
trow.RES = row[top_command_output_row_idx_res]
trow.RESBytesN = res
trow.RESParsedBytes = resTxt
shr, shrTxt, err := parseKiBInTop(row[top_command_output_row_idx_shr])
if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
trow.SHR = row[top_command_output_row_idx_shr]
trow.SHRBytesN = shr
trow.SHRParsedBytes = shrTxt
trow.S = row[top_command_output_row_idx_s]
trow.SParsedStatus = convertProcStatus(row[top_command_output_row_idx_s])
cnum, err := strconv.ParseFloat(row[top_command_output_row_idx_cpu], 64)
if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
trow.CPUPercent = cnum
mnum, err := strconv.ParseFloat(row[top_command_output_row_idx_mem], 64)
if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
trow.MEMPercent = mnum
trow.TIME = row[top_command_output_row_idx_time]
return trow, nil
}

19
vendor/github.com/gyuho/psn/util.go generated vendored
View File

@ -78,3 +78,22 @@ func homeDir() string {
}
return os.Getenv("HOME")
}
// exist returns true if the file or directory exists.
func exist(fpath string) bool {
st, err := os.Stat(fpath)
if err != nil {
if os.IsNotExist(err) {
return false
}
}
if st.IsDir() {
return true
}
if _, err := os.Stat(fpath); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}

File diff suppressed because it is too large Load Diff

View File

@ -72,80 +72,80 @@ func (m MountStatsNFS) mountStats() {}
// by an NFS client to and from an NFS server.
type NFSBytesStats struct {
// Number of bytes read using the read() syscall.
Read int
Read uint64
// Number of bytes written using the write() syscall.
Write int
Write uint64
// Number of bytes read using the read() syscall in O_DIRECT mode.
DirectRead int
DirectRead uint64
// Number of bytes written using the write() syscall in O_DIRECT mode.
DirectWrite int
DirectWrite uint64
// Number of bytes read from the NFS server, in total.
ReadTotal int
ReadTotal uint64
// Number of bytes written to the NFS server, in total.
WriteTotal int
WriteTotal uint64
// Number of pages read directly via mmap()'d files.
ReadPages int
ReadPages uint64
// Number of pages written directly via mmap()'d files.
WritePages int
WritePages uint64
}
// A NFSEventsStats contains statistics about NFS event occurrences.
type NFSEventsStats struct {
// Number of times cached inode attributes are re-validated from the server.
InodeRevalidate int
InodeRevalidate uint64
// Number of times cached dentry nodes are re-validated from the server.
DnodeRevalidate int
DnodeRevalidate uint64
// Number of times an inode cache is cleared.
DataInvalidate int
DataInvalidate uint64
// Number of times cached inode attributes are invalidated.
AttributeInvalidate int
AttributeInvalidate uint64
// Number of times files or directories have been open()'d.
VFSOpen int
VFSOpen uint64
// Number of times a directory lookup has occurred.
VFSLookup int
VFSLookup uint64
// Number of times permissions have been checked.
VFSAccess int
VFSAccess uint64
// Number of updates (and potential writes) to pages.
VFSUpdatePage int
VFSUpdatePage uint64
// Number of pages read directly via mmap()'d files.
VFSReadPage int
VFSReadPage uint64
// Number of times a group of pages have been read.
VFSReadPages int
VFSReadPages uint64
// Number of pages written directly via mmap()'d files.
VFSWritePage int
VFSWritePage uint64
// Number of times a group of pages have been written.
VFSWritePages int
VFSWritePages uint64
// Number of times directory entries have been read with getdents().
VFSGetdents int
VFSGetdents uint64
// Number of times attributes have been set on inodes.
VFSSetattr int
VFSSetattr uint64
// Number of pending writes that have been forcefully flushed to the server.
VFSFlush int
VFSFlush uint64
// Number of times fsync() has been called on directories and files.
VFSFsync int
VFSFsync uint64
// Number of times locking has been attemped on a file.
VFSLock int
VFSLock uint64
// Number of times files have been closed and released.
VFSFileRelease int
VFSFileRelease uint64
// Unknown. Possibly unused.
CongestionWait int
CongestionWait uint64
// Number of times files have been truncated.
Truncation int
Truncation uint64
// Number of times a file has been grown due to writes beyond its existing end.
WriteExtension int
WriteExtension uint64
// Number of times a file was removed while still open by another process.
SillyRename int
SillyRename uint64
// Number of times the NFS server gave less data than expected while reading.
ShortRead int
ShortRead uint64
// Number of times the NFS server wrote less data than expected while writing.
ShortWrite int
ShortWrite uint64
// Number of times the NFS server indicated EJUKEBOX; retrieving data from
// offline storage.
JukeboxDelay int
JukeboxDelay uint64
// Number of NFS v4.1+ pNFS reads.
PNFSRead int
PNFSRead uint64
// Number of NFS v4.1+ pNFS writes.
PNFSWrite int
PNFSWrite uint64
}
// A NFSOperationStats contains statistics for a single operation.
@ -153,15 +153,15 @@ type NFSOperationStats struct {
// The name of the operation.
Operation string
// Number of requests performed for this operation.
Requests int
Requests uint64
// Number of times an actual RPC request has been transmitted for this operation.
Transmissions int
Transmissions uint64
// Number of times a request has had a major timeout.
MajorTimeouts int
MajorTimeouts uint64
// Number of bytes sent for this operation, including RPC headers and payload.
BytesSent int
BytesSent uint64
// Number of bytes received for this operation, including RPC headers and payload.
BytesReceived int
BytesReceived uint64
// Duration all requests spent queued for transmission before they were sent.
CumulativeQueueTime time.Duration
// Duration it took to get a reply back after the request was transmitted.
@ -174,41 +174,41 @@ type NFSOperationStats struct {
// responses.
type NFSTransportStats struct {
// The local port used for the NFS mount.
Port int
Port uint64
// Number of times the client has had to establish a connection from scratch
// to the NFS server.
Bind int
Bind uint64
// Number of times the client has made a TCP connection to the NFS server.
Connect int
Connect uint64
// Duration (in jiffies, a kernel internal unit of time) the NFS mount has
// spent waiting for connections to the server to be established.
ConnectIdleTime int
ConnectIdleTime uint64
// Duration since the NFS mount last saw any RPC traffic.
IdleTime time.Duration
// Number of RPC requests for this mount sent to the NFS server.
Sends int
Sends uint64
// Number of RPC responses for this mount received from the NFS server.
Receives int
Receives uint64
// Number of times the NFS server sent a response with a transaction ID
// unknown to this client.
BadTransactionIDs int
BadTransactionIDs uint64
// A running counter, incremented on each request as the current difference
// ebetween sends and receives.
CumulativeActiveRequests int
CumulativeActiveRequests uint64
// A running counter, incremented on each request by the current backlog
// queue size.
CumulativeBacklog int
CumulativeBacklog uint64
// Stats below only available with stat version 1.1.
// Maximum number of simultaneously active RPC requests ever used.
MaximumRPCSlotsUsed int
MaximumRPCSlotsUsed uint64
// A running counter, incremented on each request as the current size of the
// sending queue.
CumulativeSendingQueue int
CumulativeSendingQueue uint64
// A running counter, incremented on each request as the current size of the
// pending queue.
CumulativePendingQueue int
CumulativePendingQueue uint64
}
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
@ -386,9 +386,9 @@ func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
}
ns := make([]int, 0, fieldBytesLen)
ns := make([]uint64, 0, fieldBytesLen)
for _, s := range ss {
n, err := strconv.Atoi(s)
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
@ -415,9 +415,9 @@ func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
}
ns := make([]int, 0, fieldEventsLen)
ns := make([]uint64, 0, fieldEventsLen)
for _, s := range ss {
n, err := strconv.Atoi(s)
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
@ -480,9 +480,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
}
// Skip string operation name for integers
ns := make([]int, 0, numFields-1)
ns := make([]uint64, 0, numFields-1)
for _, st := range ss[1:] {
n, err := strconv.Atoi(st)
n, err := strconv.ParseUint(st, 10, 64)
if err != nil {
return nil, err
}
@ -524,9 +524,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
// in a v1.0 response
ns := make([]int, 0, fieldTransport11Len)
ns := make([]uint64, 0, fieldTransport11Len)
for _, s := range ss {
n, err := strconv.Atoi(s)
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}