vendor: remove 'psn'

Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
This commit is contained in:
Gyu-Ho Lee 2017-05-09 21:52:44 -07:00
parent 4bdf7fc5e6
commit e44b39a988
51 changed files with 2413 additions and 2357 deletions

8
glide.lock generated
View File

@ -1,5 +1,5 @@
hash: 20541f5928e61e5615f373ea6dbcc8731e3cb81beebc1393aca7fae9a59f4850 hash: 9c8365f44c1a3ee476773bcfcd63046fb10178ec73ac87ff39964a76e853bcd1
updated: 2017-05-09T19:00:39.242844757-07:00 updated: 2017-05-09T21:52:14.583085269-07:00
imports: imports:
- name: bitbucket.org/zombiezen/gopdf - name: bitbucket.org/zombiezen/gopdf
version: 1c63dc69751bc45441c2ce1f56b631c55294b4d5 version: 1c63dc69751bc45441c2ce1f56b631c55294b4d5
@ -96,7 +96,7 @@ imports:
- name: github.com/gyuho/dataframe - name: github.com/gyuho/dataframe
version: 73de2c550b1177c1640f3dacbbc1af00f913fedb version: 73de2c550b1177c1640f3dacbbc1af00f913fedb
- name: github.com/gyuho/linux-inspect - name: github.com/gyuho/linux-inspect
version: a561d8f03516df4de161acbca6afbe8c54d1c833 version: 19adc517aa26b17ebaac4264445e116871ffef70
subpackages: subpackages:
- df - df
- etc - etc
@ -104,8 +104,6 @@ imports:
- pkg/fileutil - pkg/fileutil
- pkg/timeutil - pkg/timeutil
- proc - proc
- psn
- psn/schema
- schema - schema
- top - top
- name: github.com/hashicorp/consul - name: github.com/hashicorp/consul

View File

@ -56,7 +56,7 @@ import:
- package: github.com/gyuho/dataframe - package: github.com/gyuho/dataframe
version: 73de2c550b1177c1640f3dacbbc1af00f913fedb version: 73de2c550b1177c1640f3dacbbc1af00f913fedb
- package: github.com/gyuho/linux-inspect - package: github.com/gyuho/linux-inspect
version: a561d8f03516df4de161acbca6afbe8c54d1c833 version: 19adc517aa26b17ebaac4264445e116871ffef70
subpackages: subpackages:
- df - df
- etc - etc
@ -66,7 +66,6 @@ import:
- proc - proc
- schema - schema
- top - top
- psn
- package: github.com/hashicorp/consul - package: github.com/hashicorp/consul
version: 7fa2471ba75408865496d4f0c7db26d234fb71b8 version: 7fa2471ba75408865496d4f0c7db26d234fb71b8
subpackages: subpackages:

262
vendor/github.com/gyuho/linux-inspect/df/df.go generated vendored Normal file
View File

@ -0,0 +1,262 @@
package df
import (
"bytes"
"fmt"
"io"
"os/exec"
"path/filepath"
"reflect"
"strconv"
"strings"
"github.com/gyuho/linux-inspect/pkg/fileutil"
humanize "github.com/dustin/go-humanize"
)
// Get returns entries in 'df' command.
// Pass '' target to list all information.
func Get(dfPath string, target string) ([]Row, error) {
o, err := Read(dfPath, target)
if err != nil {
return nil, err
}
return Parse(o)
}
// GetDefault returns entries in 'df' command.
// Pass '' target to list all information.
func GetDefault(target string) ([]Row, error) {
o, err := Read(dfPath, target)
if err != nil {
return nil, err
}
return Parse(o)
}
// dfPath is the default 'df' command path.
const dfPath = "/bin/df"
// dfFlags is 'df --all --sync --block-size=1024 --output=source,target,fstype,file,itotal,iavail,iused,ipcent,size,avail,used,pcent'.
var dfFlags = []string{"--all", "--sync", "--block-size=1024", "--output=source,target,fstype,file,itotal,iavail,iused,ipcent,size,avail,used,pcent"}
// ReadDefault reads Linux 'df' command output.
// Pass '' target to list all information.
func ReadDefault(target string) (string, error) {
return Read(dfPath, target)
}
// Read reads Linux 'df' command output.
// Pass '' target to list all information.
func Read(dfPath string, target string) (string, error) {
buf := new(bytes.Buffer)
err := read(dfPath, target, buf)
o := strings.TrimSpace(buf.String())
return o, err
}
func read(dfPath string, target string, w io.Writer) error {
if !fileutil.Exist(dfPath) {
return fmt.Errorf("%q does not exist", dfPath)
}
if target != "" {
dfFlags = append(dfFlags, strings.TrimSpace(target))
}
cmd := exec.Command(dfPath, dfFlags...)
cmd.Stdout = w
cmd.Stderr = w
return cmd.Run()
}
// Headers is the headers in 'df' output.
var Headers = []string{
"Filesystem",
// Mounted on
"Mounted",
"on",
"Type",
"File",
"Inodes",
"IFree",
"IUsed",
"IUse%",
"1K-blocks",
"Avail",
"Used",
"Use%",
}
type commandOutpudrowIdx int
const (
command_output_row_idx_file_system commandOutpudrowIdx = iota
command_output_row_idx_mounted_on
command_output_row_idx_file_system_type
command_output_row_idx_file
command_output_row_idx_inodes
command_output_row_idx_ifree
command_output_row_idx_iused
command_output_row_idx_iused_percent
command_output_row_idx_total_blocks
command_output_row_idx_available_blocks
command_output_row_idx_used_blocks
command_output_row_idx_used_blocks_percentage
)
// Parse parses 'df' command output and returns the rows.
func Parse(s string) ([]Row, error) {
lines := strings.Split(s, "\n")
rows := make([][]string, 0, len(lines))
headerFound := false
for _, line := range lines {
if len(line) == 0 {
continue
}
ds := strings.Fields(strings.TrimSpace(line))
if ds[0] == "Filesystem" { // header line
if !reflect.DeepEqual(ds, Headers) {
return nil, fmt.Errorf("unexpected 'df' command header order (%v, expected %v, output: %q)", ds, Headers, s)
}
headerFound = true
continue
}
if !headerFound {
continue
}
row := strings.Fields(strings.TrimSpace(line))
if len(row) != len(Headers)-1 {
return nil, fmt.Errorf("unexpected row column number %v (expected %v)", row, Headers)
}
rows = append(rows, row)
}
type result struct {
row Row
err error
}
rc := make(chan result, len(rows))
for _, row := range rows {
go func(row []string) {
tr, err := parseRow(row)
rc <- result{row: tr, err: err}
}(row)
}
tcRows := make([]Row, 0, len(rows))
for len(tcRows) != len(rows) {
select {
case rs := <-rc:
if rs.err != nil {
return nil, rs.err
}
tcRows = append(tcRows, rs.row)
}
}
rm := make(map[string]Row)
for _, row := range tcRows {
rm[row.MountedOn] = row
}
rrs := make([]Row, 0, len(rm))
for _, row := range rm {
rrs = append(rrs, row)
}
return rrs, nil
}
func parseRow(row []string) (Row, error) {
drow := Row{
FileSystem: strings.TrimSpace(row[command_output_row_idx_file_system]),
MountedOn: strings.TrimSpace(row[command_output_row_idx_mounted_on]),
FileSystemType: strings.TrimSpace(row[command_output_row_idx_file_system_type]),
File: strings.TrimSpace(row[command_output_row_idx_file]),
IusedPercent: strings.TrimSpace(strings.Replace(row[command_output_row_idx_iused_percent], "%", " %", -1)),
UsedBlocksPercent: strings.TrimSpace(strings.Replace(row[command_output_row_idx_used_blocks_percentage], "%", " %", -1)),
}
drow.Device = filepath.Base(drow.FileSystem)
ptxt := strings.TrimSpace(row[command_output_row_idx_inodes])
if ptxt == "-" {
ptxt = "0"
}
iv, err := strconv.ParseInt(ptxt, 10, 64)
if err != nil {
return Row{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
drow.Inodes = iv
ptxt = strings.TrimSpace(row[command_output_row_idx_ifree])
if ptxt == "-" {
ptxt = "0"
}
iv, err = strconv.ParseInt(ptxt, 10, 64)
if err != nil {
return Row{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
drow.Ifree = iv
ptxt = strings.TrimSpace(row[command_output_row_idx_iused])
if ptxt == "-" {
ptxt = "0"
}
iv, err = strconv.ParseInt(ptxt, 10, 64)
if err != nil {
return Row{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
drow.Iused = iv
ptxt = strings.TrimSpace(row[command_output_row_idx_total_blocks])
if ptxt == "-" {
ptxt = "0"
}
iv, err = strconv.ParseInt(ptxt, 10, 64)
if err != nil {
return Row{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
drow.TotalBlocks = iv
drow.TotalBlocksBytesN = iv * 1024
drow.TotalBlocksParsedBytes = humanize.Bytes(uint64(drow.TotalBlocksBytesN))
ptxt = strings.TrimSpace(row[command_output_row_idx_available_blocks])
if ptxt == "-" {
ptxt = "0"
}
iv, err = strconv.ParseInt(ptxt, 10, 64)
if err != nil {
return Row{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
drow.AvailableBlocks = iv
drow.AvailableBlocksBytesN = iv * 1024
drow.AvailableBlocksParsedBytes = humanize.Bytes(uint64(drow.AvailableBlocksBytesN))
ptxt = strings.TrimSpace(row[command_output_row_idx_used_blocks])
if ptxt == "-" {
ptxt = "0"
}
iv, err = strconv.ParseInt(ptxt, 10, 64)
if err != nil {
return Row{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
drow.UsedBlocks = iv
drow.UsedBlocksBytesN = iv * 1024
drow.UsedBlocksParsedBytes = humanize.Bytes(uint64(drow.UsedBlocksBytesN))
return drow, nil
}
// GetDevice returns the device name where dir is mounted.
func GetDevice(target string) (string, error) {
drows, err := GetDefault(target)
if err != nil {
return "", err
}
if len(drows) != 1 {
return "", fmt.Errorf("expected 1 df row at %q (got %+v)", target, drows)
}
return drows[0].Device, nil
}

3
vendor/github.com/gyuho/linux-inspect/df/doc.go generated vendored Normal file
View File

@ -0,0 +1,3 @@
// Package df wraps Unix 'df' command.
// Reference https://en.wikipedia.org/wiki/Df_(Unix).
package df

39
vendor/github.com/gyuho/linux-inspect/df/generated.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
package df
// updated at 2017-05-09 15:51:39.458012515 -0700 PDT
// Row is 'df' command output row in Linux.
type Row struct {
// FileSystem is file system ('source').
FileSystem string `column:"file_system"`
// Device is device name.
Device string `column:"device"`
// MountedOn is 'mounted on' ('target').
MountedOn string `column:"mounted_on"`
// FileSystemType is file system type ('fstype').
FileSystemType string `column:"file_system_type"`
// File is file name if specified on the command line ('file').
File string `column:"file"`
// Inodes is total number of inodes ('itotal').
Inodes int64 `column:"inodes"`
// Ifree is number of available inodes ('iavail').
Ifree int64 `column:"ifree"`
// Iused is number of used inodes ('iused').
Iused int64 `column:"iused"`
// IusedPercent is percentage of iused divided by itotal ('ipcent').
IusedPercent string `column:"iused_percent"`
// TotalBlocks is total number of 1K-blocks ('size').
TotalBlocks int64 `column:"total_blocks"`
TotalBlocksBytesN int64 `column:"total_blocks_bytes_n"`
TotalBlocksParsedBytes string `column:"total_blocks_parsed_bytes"`
// AvailableBlocks is number of available 1K-blocks ('avail').
AvailableBlocks int64 `column:"available_blocks"`
AvailableBlocksBytesN int64 `column:"available_blocks_bytes_n"`
AvailableBlocksParsedBytes string `column:"available_blocks_parsed_bytes"`
// UsedBlocks is number of used 1K-blocks ('used').
UsedBlocks int64 `column:"used_blocks"`
UsedBlocksBytesN int64 `column:"used_blocks_bytes_n"`
UsedBlocksParsedBytes string `column:"used_blocks_parsed_bytes"`
// UsedBlocksPercent is percentage of used-blocks divided by total-blocks ('pcent').
UsedBlocksPercent string `column:"used_blocks_percent"`
}

38
vendor/github.com/gyuho/linux-inspect/df/schema.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package df
import (
"reflect"
"github.com/gyuho/linux-inspect/schema"
)
// RowSchema represents 'df' command output row
// (See https://en.wikipedia.org/wiki/Df_(Unix)
// and https://www.gnu.org/software/coreutils/manual/html_node/df-invocation.html
// and 'df --all --sync --block-size=1024 --output=source,target,fstype,file,itotal,iavail,iused,ipcent,size,avail,used,pcent'
// and the output unit is kilobytes).
var RowSchema = schema.RawData{
IsYAML: false,
Columns: []schema.Column{
{Name: "file-system", Godoc: "file system ('source')", Kind: reflect.String},
{Name: "device", Godoc: "device name", Kind: reflect.String},
{Name: "mounted-on", Godoc: "'mounted on' ('target')", Kind: reflect.String},
{Name: "file-system-type", Godoc: "file system type ('fstype')", Kind: reflect.String},
{Name: "file", Godoc: "file name if specified on the command line ('file')", Kind: reflect.String},
{Name: "inodes", Godoc: "total number of inodes ('itotal')", Kind: reflect.Int64},
{Name: "ifree", Godoc: "number of available inodes ('iavail')", Kind: reflect.Int64},
{Name: "iused", Godoc: "number of used inodes ('iused')", Kind: reflect.Int64},
{Name: "iused-percent", Godoc: "percentage of iused divided by itotal ('ipcent')", Kind: reflect.String},
{Name: "total-blocks", Godoc: "total number of 1K-blocks ('size')", Kind: reflect.Int64},
{Name: "available-blocks", Godoc: "number of available 1K-blocks ('avail')", Kind: reflect.Int64},
{Name: "used-blocks", Godoc: "number of used 1K-blocks ('used')", Kind: reflect.Int64},
{Name: "used-blocks-percent", Godoc: "percentage of used-blocks divided by total-blocks ('pcent')", Kind: reflect.String},
},
ColumnsToParse: map[string]schema.RawDataType{
"total-blocks": schema.TypeBytes,
"available-blocks": schema.TypeBytes,
"used-blocks": schema.TypeBytes,
},
}

View File

@ -1,4 +1,4 @@
package psn package inspect
import ( import (
"math" "math"

2
vendor/github.com/gyuho/linux-inspect/inspect/doc.go generated vendored Normal file
View File

@ -0,0 +1,2 @@
// Package inspect inspects '/proc/*'.
package inspect

View File

@ -1,9 +1,11 @@
package psn package inspect
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/gyuho/linux-inspect/proc"
"github.com/gyuho/dataframe" "github.com/gyuho/dataframe"
"github.com/olekukonko/tablewriter" "github.com/olekukonko/tablewriter"
) )
@ -28,7 +30,7 @@ type DSEntry struct {
// GetDS lists all disk statistics. // GetDS lists all disk statistics.
func GetDS() ([]DSEntry, error) { func GetDS() ([]DSEntry, error) {
ss, err := GetProcDiskstats() ss, err := proc.GetDiskstats()
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,9 +1,11 @@
package psn package inspect
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/gyuho/linux-inspect/proc"
"github.com/gyuho/dataframe" "github.com/gyuho/dataframe"
"github.com/olekukonko/tablewriter" "github.com/olekukonko/tablewriter"
) )
@ -25,7 +27,7 @@ type NSEntry struct {
// GetNS lists all '/proc/net/dev' statistics. // GetNS lists all '/proc/net/dev' statistics.
func GetNS() ([]NSEntry, error) { func GetNS() ([]NSEntry, error) {
ss, err := GetProcNetDev() ss, err := proc.GetNetDev()
if err != nil { if err != nil {
return nil, err return nil, err
} }

134
vendor/github.com/gyuho/linux-inspect/inspect/op.go generated vendored Normal file
View File

@ -0,0 +1,134 @@
package inspect
import (
"fmt"
"strings"
"github.com/gyuho/linux-inspect/top"
)
// EntryOp defines entry option(filter).
type EntryOp struct {
ProgramMatchFunc func(string) bool
program string
PID int64
TopLimit int
// for ss
TCP bool
TCP6 bool
LocalPort int64
RemotePort int64
// for ps
TopExecPath string
TopStream *top.Stream
// for Proc
DiskDevice string
NetworkInterface string
ExtraPath string
}
// OpFunc applies each filter.
type OpFunc func(*EntryOp)
// WithProgramMatch matches command name.
func WithProgramMatch(matchFunc func(string) bool) OpFunc {
return func(op *EntryOp) { op.ProgramMatchFunc = matchFunc }
}
// WithProgram to filter entries by program name.
func WithProgram(name string) OpFunc {
return func(op *EntryOp) {
op.ProgramMatchFunc = func(commandName string) bool {
return strings.HasSuffix(commandName, name)
}
op.program = name
}
}
// WithPID to filter entries by PIDs.
func WithPID(pid int64) OpFunc {
return func(op *EntryOp) { op.PID = pid }
}
// WithTopLimit to filter entries with limit.
func WithTopLimit(limit int) OpFunc {
return func(op *EntryOp) { op.TopLimit = limit }
}
// WithLocalPort to filter entries by local port.
func WithLocalPort(port int64) OpFunc {
return func(op *EntryOp) { op.LocalPort = port }
}
// WithRemotePort to filter entries by remote port.
func WithRemotePort(port int64) OpFunc {
return func(op *EntryOp) { op.RemotePort = port }
}
// WithTCP to filter entries by TCP.
// Can be used with 'WithTCP6'.
func WithTCP() OpFunc {
return func(op *EntryOp) { op.TCP = true }
}
// WithTCP6 to filter entries by TCP6.
// Can be used with 'WithTCP'.
func WithTCP6() OpFunc {
return func(op *EntryOp) { op.TCP6 = true }
}
// WithTopExecPath configures 'top' command path.
func WithTopExecPath(path string) OpFunc {
return func(op *EntryOp) { op.TopExecPath = path }
}
// WithTopStream gets the PSEntry from the 'top' stream.
func WithTopStream(str *top.Stream) OpFunc {
return func(op *EntryOp) { op.TopStream = str }
}
// WithDiskDevice to filter entries by disk device.
func WithDiskDevice(name string) OpFunc {
return func(op *EntryOp) { op.DiskDevice = name }
}
// WithNetworkInterface to filter entries by disk device.
func WithNetworkInterface(name string) OpFunc {
return func(op *EntryOp) { op.NetworkInterface = name }
}
// WithExtraPath to filter entries by disk device.
func WithExtraPath(path string) OpFunc {
return func(op *EntryOp) { op.ExtraPath = path }
}
// applyOpts panics when op.Program != "" && op.PID > 0.
func (op *EntryOp) applyOpts(opts []OpFunc) {
for _, of := range opts {
of(op)
}
if op.DiskDevice != "" || op.NetworkInterface != "" || op.ExtraPath != "" {
if (op.program != "" || op.ProgramMatchFunc != nil) || op.TopLimit > 0 || op.LocalPort > 0 || op.RemotePort > 0 || op.TCP || op.TCP6 {
panic(fmt.Errorf("not-valid Proc fileter; disk device %q or network interface %q or extra path %q", op.DiskDevice, op.NetworkInterface, op.ExtraPath))
}
}
if (op.program != "" || op.ProgramMatchFunc != nil) && op.PID > 0 {
panic(fmt.Errorf("can't filter both by program(%q or %p) and PID(%d)", op.program, op.ProgramMatchFunc, op.PID))
}
if !op.TCP && !op.TCP6 {
// choose both
op.TCP, op.TCP6 = true, true
}
if op.LocalPort > 0 && op.RemotePort > 0 {
panic(fmt.Errorf("can't query by both local(%d) and remote(%d) ports", op.LocalPort, op.RemotePort))
}
if op.TopExecPath == "" {
op.TopExecPath = top.DefaultExecPath
}
}

View File

@ -1,9 +1,12 @@
package psn package inspect
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"time" "time"
"github.com/gyuho/linux-inspect/pkg/fileutil"
"github.com/gyuho/linux-inspect/proc"
) )
// Proc represents an entry of various system statistics. // Proc represents an entry of various system statistics.
@ -16,7 +19,7 @@ type Proc struct {
PSEntry PSEntry PSEntry PSEntry
LoadAvg LoadAvg LoadAvg proc.LoadAvg
DSEntry DSEntry DSEntry DSEntry
ReadsCompletedDelta uint64 ReadsCompletedDelta uint64
@ -61,18 +64,23 @@ func (p ProcSlice) Less(i, j int) bool {
return p[i].UnixSecond < p[j].UnixSecond return p[i].UnixSecond < p[j].UnixSecond
} }
// nanoToUnix converts unix nanoseconds to unix second.
func nanoToUnix(unixNano int64) (unixSec int64) {
return int64(unixNano / 1e9)
}
// GetProc returns current 'Proc' data. // GetProc returns current 'Proc' data.
// PID is required. // PID is required.
// Disk device, network interface, extra path are optional. // Disk device, network interface, extra path are optional.
func GetProc(opts ...FilterFunc) (Proc, error) { func GetProc(opts ...OpFunc) (Proc, error) {
ft := &EntryFilter{} op := &EntryOp{}
ft.applyOpts(opts) op.applyOpts(opts)
if ft.PID == 0 { if op.PID == 0 {
return Proc{}, fmt.Errorf("unknown PID %d", ft.PID) return Proc{}, fmt.Errorf("unknown PID %d", op.PID)
} }
ts := time.Now().UnixNano() ts := time.Now().UnixNano()
proc := Proc{UnixNanosecond: ts, UnixSecond: ConvertUnixNano(ts)} pc := Proc{UnixNanosecond: ts, UnixSecond: nanoToUnix(ts)}
toFinish := 0 toFinish := 0
@ -80,31 +88,31 @@ func GetProc(opts ...FilterFunc) (Proc, error) {
toFinish++ toFinish++
go func() { go func() {
// get process stats // get process stats
ets, err := GetPS(WithPID(ft.PID), WithTopStream(ft.TopStream)) ets, err := GetPS(WithPID(op.PID), WithTopStream(op.TopStream))
if err != nil { if err != nil {
errc <- err errc <- err
return return
} }
if len(ets) != 1 { if len(ets) != 1 {
errc <- fmt.Errorf("len(PID=%d entries) != 1 (got %d)", ft.PID, len(ets)) errc <- fmt.Errorf("len(PID=%d entries) != 1 (got %d)", op.PID, len(ets))
return return
} }
proc.PSEntry = ets[0] pc.PSEntry = ets[0]
errc <- nil errc <- nil
}() }()
toFinish++ toFinish++
go func() { go func() {
lvg, err := GetProcLoadAvg() lvg, err := proc.GetLoadAvg()
if err != nil { if err != nil {
errc <- err errc <- err
return return
} }
proc.LoadAvg = lvg pc.LoadAvg = lvg
errc <- nil errc <- nil
}() }()
if ft.DiskDevice != "" { if op.DiskDevice != "" {
toFinish++ toFinish++
go func() { go func() {
// get diskstats // get diskstats
@ -114,8 +122,8 @@ func GetProc(opts ...FilterFunc) (Proc, error) {
return return
} }
for _, elem := range ds { for _, elem := range ds {
if elem.Device == ft.DiskDevice { if elem.Device == op.DiskDevice {
proc.DSEntry = elem pc.DSEntry = elem
break break
} }
} }
@ -123,7 +131,7 @@ func GetProc(opts ...FilterFunc) (Proc, error) {
}() }()
} }
if ft.NetworkInterface != "" { if op.NetworkInterface != "" {
toFinish++ toFinish++
go func() { go func() {
// get network I/O stats // get network I/O stats
@ -133,8 +141,8 @@ func GetProc(opts ...FilterFunc) (Proc, error) {
return return
} }
for _, elem := range ns { for _, elem := range ns {
if elem.Interface == ft.NetworkInterface { if elem.Interface == op.NetworkInterface {
proc.NSEntry = elem pc.NSEntry = elem
break break
} }
} }
@ -142,20 +150,21 @@ func GetProc(opts ...FilterFunc) (Proc, error) {
}() }()
} }
if ft.ExtraPath != "" { if op.ExtraPath != "" {
toFinish++ toFinish++
go func() { go func() {
f, err := openToRead(ft.ExtraPath) f, err := fileutil.OpenToRead(op.ExtraPath)
if err != nil { if err != nil {
errc <- err errc <- err
return return
} }
defer f.Close()
b, err := ioutil.ReadAll(f) b, err := ioutil.ReadAll(f)
if err != nil { if err != nil {
errc <- err errc <- err
return return
} }
proc.Extra = b pc.Extra = b
errc <- nil errc <- nil
}() }()
} }
@ -169,17 +178,17 @@ func GetProc(opts ...FilterFunc) (Proc, error) {
cnt++ cnt++
} }
if ft.DiskDevice != "" { if op.DiskDevice != "" {
if proc.DSEntry.Device == "" { if pc.DSEntry.Device == "" {
return Proc{}, fmt.Errorf("disk device %q was not found (%+v)", ft.DiskDevice, proc.DSEntry) return Proc{}, fmt.Errorf("disk device %q was not found (%+v)", op.DiskDevice, pc.DSEntry)
} }
} }
if ft.NetworkInterface != "" { if op.NetworkInterface != "" {
if proc.NSEntry.Interface == "" { if pc.NSEntry.Interface == "" {
return Proc{}, fmt.Errorf("network interface %q was not found", ft.NetworkInterface) return Proc{}, fmt.Errorf("network interface %q was not found", op.NetworkInterface)
} }
} }
return proc, nil return pc, nil
} }
var ( var (

View File

@ -1,4 +1,4 @@
package psn package inspect
import ( import (
"encoding/csv" "encoding/csv"
@ -6,6 +6,10 @@ import (
"log" "log"
"strconv" "strconv"
"github.com/gyuho/linux-inspect/pkg/fileutil"
"github.com/gyuho/linux-inspect/proc"
"github.com/gyuho/linux-inspect/top"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
) )
@ -30,7 +34,7 @@ type CSV struct {
// TopStream feeds realtime 'top' command data in the background, every second. // TopStream feeds realtime 'top' command data in the background, every second.
// And whenver 'Add' gets called, returns the latest 'top' data. // And whenver 'Add' gets called, returns the latest 'top' data.
// Use this to provide more accurate CPU usage. // Use this to provide more accurate CPU usage.
TopStream *TopStream TopStream *top.Stream
// Rows are sorted by unix time in nanoseconds. // Rows are sorted by unix time in nanoseconds.
// It's the number of nanoseconds (not seconds) elapsed // It's the number of nanoseconds (not seconds) elapsed
@ -39,7 +43,7 @@ type CSV struct {
} }
// NewCSV returns a new CSV. // NewCSV returns a new CSV.
func NewCSV(fpath string, pid int64, diskDevice string, networkInterface string, extraPath string, tcfg *TopConfig) (c *CSV, err error) { func NewCSV(fpath string, pid int64, diskDevice string, networkInterface string, extraPath string, tcfg *top.Config) (c *CSV, err error) {
c = &CSV{ c = &CSV{
FilePath: fpath, FilePath: fpath,
PID: pid, PID: pid,
@ -136,7 +140,7 @@ func (c *CSV) Save() error {
} }
} }
f, err := openToAppend(c.FilePath) f, err := fileutil.OpenToAppend(c.FilePath)
if err != nil { if err != nil {
return err return err
} }
@ -162,7 +166,7 @@ func (c *CSV) Save() error {
// ReadCSV reads a CSV file and convert to 'CSV'. // ReadCSV reads a CSV file and convert to 'CSV'.
// Make sure to change this whenever 'Proc' fields are updated. // Make sure to change this whenever 'Proc' fields are updated.
func ReadCSV(fpath string) (*CSV, error) { func ReadCSV(fpath string) (*CSV, error) {
f, err := openToRead(fpath) f, err := fileutil.OpenToRead(fpath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -203,9 +207,9 @@ func ReadCSV(fpath string) (*CSV, error) {
Header: ProcHeader, Header: ProcHeader,
HeaderIndex: ProcHeaderIndex, HeaderIndex: ProcHeaderIndex,
MinUnixNanosecond: min, MinUnixNanosecond: min,
MinUnixSecond: ConvertUnixNano(min), MinUnixSecond: nanoToUnix(min),
MaxUnixNanosecond: max, MaxUnixNanosecond: max,
MaxUnixSecond: ConvertUnixNano(max), MaxUnixSecond: nanoToUnix(max),
Rows: make([]Proc, 0, len(rows)), Rows: make([]Proc, 0, len(rows)),
} }
@ -361,7 +365,7 @@ func ReadCSV(fpath string) (*CSV, error) {
return nil, err return nil, err
} }
proc := Proc{ pc := Proc{
UnixNanosecond: ts, UnixNanosecond: ts,
UnixSecond: tss, UnixSecond: tss,
@ -382,7 +386,7 @@ func ReadCSV(fpath string) (*CSV, error) {
VMSizeNum: vmSizeNum, VMSizeNum: vmSizeNum,
}, },
LoadAvg: LoadAvg{ LoadAvg: proc.LoadAvg{
LoadAvg1Minute: loadAvg1min, LoadAvg1Minute: loadAvg1min,
LoadAvg5Minute: loadAvg5min, LoadAvg5Minute: loadAvg5min,
LoadAvg15Minute: loadAvg15min, LoadAvg15Minute: loadAvg15min,
@ -427,11 +431,11 @@ func ReadCSV(fpath string) (*CSV, error) {
Extra: []byte(row[ProcHeaderIndex["EXTRA"]]), Extra: []byte(row[ProcHeaderIndex["EXTRA"]]),
} }
c.PID = proc.PSEntry.PID c.PID = pc.PSEntry.PID
c.DiskDevice = proc.DSEntry.Device c.DiskDevice = pc.DSEntry.Device
c.NetworkInterface = proc.NSEntry.Interface c.NetworkInterface = pc.NSEntry.Interface
c.Rows = append(c.Rows, proc) c.Rows = append(c.Rows, pc)
} }
return c, nil return c, nil

View File

@ -1,9 +1,11 @@
package psn package inspect
import ( import (
"fmt" "fmt"
"sort" "sort"
"github.com/gyuho/linux-inspect/pkg/timeutil"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
) )
@ -142,9 +144,9 @@ func Combine(procs ...Proc) Proc {
combined.DSEntry.WritesCompleted = uint64(writesCompleted) / uint64(pN) combined.DSEntry.WritesCompleted = uint64(writesCompleted) / uint64(pN)
combined.DSEntry.SectorsWritten = uint64(sectorsWritten) / uint64(pN) combined.DSEntry.SectorsWritten = uint64(sectorsWritten) / uint64(pN)
combined.DSEntry.TimeSpentOnReadingMs = uint64(timeSpentOnReadingMs) / uint64(pN) combined.DSEntry.TimeSpentOnReadingMs = uint64(timeSpentOnReadingMs) / uint64(pN)
combined.DSEntry.TimeSpentOnReading = humanizeDurationMs(combined.DSEntry.TimeSpentOnReadingMs) combined.DSEntry.TimeSpentOnReading = timeutil.HumanizeDurationMs(combined.DSEntry.TimeSpentOnReadingMs)
combined.DSEntry.TimeSpentOnWritingMs = uint64(timeSpentOnWritingMs) / uint64(pN) combined.DSEntry.TimeSpentOnWritingMs = uint64(timeSpentOnWritingMs) / uint64(pN)
combined.DSEntry.TimeSpentOnWriting = humanizeDurationMs(combined.DSEntry.TimeSpentOnWritingMs) combined.DSEntry.TimeSpentOnWriting = timeutil.HumanizeDurationMs(combined.DSEntry.TimeSpentOnWritingMs)
combined.ReadsCompletedDelta = uint64(readsCompletedDelta) / uint64(pN) combined.ReadsCompletedDelta = uint64(readsCompletedDelta) / uint64(pN)
combined.SectorsReadDelta = uint64(sectorsReadDelta) / uint64(pN) combined.SectorsReadDelta = uint64(sectorsReadDelta) / uint64(pN)
combined.WritesCompletedDelta = uint64(writesCompletedDelta) / uint64(pN) combined.WritesCompletedDelta = uint64(writesCompletedDelta) / uint64(pN)
@ -266,9 +268,9 @@ func Interpolate(lower, upper Proc) (procs []Proc, err error) {
procs[i].DSEntry.WritesCompleted = uint64(int64(lower.DSEntry.WritesCompleted) + int64(i+1)*writesCompleted) procs[i].DSEntry.WritesCompleted = uint64(int64(lower.DSEntry.WritesCompleted) + int64(i+1)*writesCompleted)
procs[i].DSEntry.SectorsWritten = uint64(int64(lower.DSEntry.SectorsWritten) + int64(i+1)*sectorsWritten) procs[i].DSEntry.SectorsWritten = uint64(int64(lower.DSEntry.SectorsWritten) + int64(i+1)*sectorsWritten)
procs[i].DSEntry.TimeSpentOnReadingMs = uint64(int64(lower.DSEntry.TimeSpentOnReadingMs) + int64(i+1)*timeSpentOnReadingMs) procs[i].DSEntry.TimeSpentOnReadingMs = uint64(int64(lower.DSEntry.TimeSpentOnReadingMs) + int64(i+1)*timeSpentOnReadingMs)
procs[i].DSEntry.TimeSpentOnReading = humanizeDurationMs(procs[i].DSEntry.TimeSpentOnReadingMs) procs[i].DSEntry.TimeSpentOnReading = timeutil.HumanizeDurationMs(procs[i].DSEntry.TimeSpentOnReadingMs)
procs[i].DSEntry.TimeSpentOnWritingMs = uint64(int64(lower.DSEntry.TimeSpentOnWritingMs) + int64(i+1)*timeSpentOnWritingMs) procs[i].DSEntry.TimeSpentOnWritingMs = uint64(int64(lower.DSEntry.TimeSpentOnWritingMs) + int64(i+1)*timeSpentOnWritingMs)
procs[i].DSEntry.TimeSpentOnWriting = humanizeDurationMs(procs[i].DSEntry.TimeSpentOnWritingMs) procs[i].DSEntry.TimeSpentOnWriting = timeutil.HumanizeDurationMs(procs[i].DSEntry.TimeSpentOnWritingMs)
procs[i].ReadsCompletedDelta = uint64(int64(lower.ReadsCompletedDelta) + int64(i+1)*readsCompletedDelta) procs[i].ReadsCompletedDelta = uint64(int64(lower.ReadsCompletedDelta) + int64(i+1)*readsCompletedDelta)
procs[i].SectorsReadDelta = uint64(int64(lower.SectorsReadDelta) + int64(i+1)*sectorsReadDelta) procs[i].SectorsReadDelta = uint64(int64(lower.SectorsReadDelta) + int64(i+1)*sectorsReadDelta)
procs[i].WritesCompletedDelta = uint64(int64(lower.WritesCompletedDelta) + int64(i+1)*writesCompletedDelta) procs[i].WritesCompletedDelta = uint64(int64(lower.WritesCompletedDelta) + int64(i+1)*writesCompletedDelta)
@ -463,8 +465,3 @@ func (c *CSV) Interpolate() (cc *CSV, err error) {
return return
} }
// ConvertUnixNano unix nanoseconds to unix second.
func ConvertUnixNano(unixNano int64) (unixSec int64) {
return int64(unixNano / 1e9)
}

View File

@ -1,4 +1,4 @@
package psn package inspect
import ( import (
"bytes" "bytes"
@ -6,6 +6,9 @@ import (
"log" "log"
"sync" "sync"
"github.com/gyuho/linux-inspect/proc"
"github.com/gyuho/linux-inspect/top"
"github.com/gyuho/dataframe" "github.com/gyuho/dataframe"
"github.com/olekukonko/tablewriter" "github.com/olekukonko/tablewriter"
) )
@ -37,66 +40,66 @@ type PSEntry struct {
const maxConcurrentProcFDLimit = 32 const maxConcurrentProcFDLimit = 32
// GetPS finds all PSEntry by given filter. // GetPS finds all PSEntry by given filter.
func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) { func GetPS(opts ...OpFunc) (pss []PSEntry, err error) {
ft := &EntryFilter{} op := &EntryOp{}
ft.applyOpts(opts) op.applyOpts(opts)
var pids []int64 var pids []int64
switch { switch {
case ft.ProgramMatchFunc == nil && ft.PID < 1: case op.ProgramMatchFunc == nil && op.PID < 1:
// get all PIDs // get all PIDs
pids, err = ListPIDs() pids, err = proc.ListPIDs()
if err != nil { if err != nil {
return return
} }
case ft.PID > 0: case op.PID > 0:
pids = []int64{ft.PID} pids = []int64{op.PID}
case ft.ProgramMatchFunc != nil: case op.ProgramMatchFunc != nil:
// later to find PIDs by Program // later to find PIDs by Program
pids = nil pids = nil
default: default:
// applyOpts already panic when ft.ProgramMatchFunc != nil && ft.PID > 0 // applyOpts already panic when op.ProgramMatchFunc != nil && op.PID > 0
} }
// can't filter both by program and by PID // can't filter both by program and by PID
if len(pids) == 0 { if len(pids) == 0 {
// list all PIDs, or later to match by Program // list all PIDs, or later to match by Program
if pids, err = ListPIDs(); err != nil { if pids, err = proc.ListPIDs(); err != nil {
return return
} }
} else { } else {
ft.ProgramMatchFunc = func(string) bool { return true } op.ProgramMatchFunc = func(string) bool { return true }
} }
var topM map[int64]TopCommandRow var topM map[int64]top.Row
if ft.TopStream == nil { if op.TopStream == nil {
var topRows []TopCommandRow var topRows []top.Row
if len(pids) == 1 { if len(pids) == 1 {
topRows, err = GetTop(ft.TopCommandPath, pids[0]) topRows, err = top.Get(op.TopExecPath, pids[0])
if err != nil { if err != nil {
return return
} }
} else { } else {
topRows, err = GetTop(ft.TopCommandPath, 0) topRows, err = top.Get(op.TopExecPath, 0)
if err != nil { if err != nil {
return return
} }
} }
topM = make(map[int64]TopCommandRow, len(topRows)) topM = make(map[int64]top.Row, len(topRows))
for _, row := range topRows { for _, row := range topRows {
topM[row.PID] = row topM[row.PID] = row
} }
for _, pid := range pids { for _, pid := range pids {
if _, ok := topM[pid]; !ok { if _, ok := topM[pid]; !ok {
topM[pid] = TopCommandRow{PID: pid} topM[pid] = top.Row{PID: pid}
log.Printf("PID %d is not found at 'top' command output", pid) log.Printf("PID %d is not found at 'top' command output", pid)
} }
} }
} else { } else {
topM = ft.TopStream.Latest() topM = op.TopStream.Latest()
} }
var pmu sync.RWMutex var pmu sync.RWMutex
@ -113,12 +116,12 @@ func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) {
limitc <- struct{}{} limitc <- struct{}{}
topRow := topM[pid] topRow := topM[pid]
if !ft.ProgramMatchFunc(topRow.COMMAND) { if !op.ProgramMatchFunc(topRow.COMMAND) {
return return
} }
pmu.RLock() pmu.RLock()
done := ft.TopLimit > 0 && len(pss) >= ft.TopLimit done := op.TopLimit > 0 && len(pss) >= op.TopLimit
pmu.RUnlock() pmu.RUnlock()
if done { if done {
return return
@ -137,14 +140,14 @@ func GetPS(opts ...FilterFunc) (pss []PSEntry, err error) {
} }
wg.Wait() wg.Wait()
if ft.TopLimit > 0 && len(pss) > ft.TopLimit { if op.TopLimit > 0 && len(pss) > op.TopLimit {
pss = pss[:ft.TopLimit:ft.TopLimit] pss = pss[:op.TopLimit:op.TopLimit]
} }
return return
} }
func getPSEntry(pid int64, topRow TopCommandRow) (PSEntry, error) { func getPSEntry(pid int64, topRow top.Row) (PSEntry, error) {
status, err := GetProcStatusByPID(pid) status, err := proc.GetStatusByPID(pid)
if err != nil { if err != nil {
return PSEntry{}, err return PSEntry{}, err
} }

View File

@ -1,4 +1,4 @@
package psn package inspect
import ( import (
"bytes" "bytes"
@ -7,6 +7,8 @@ import (
"os/user" "os/user"
"sync" "sync"
"github.com/gyuho/linux-inspect/proc"
"github.com/gyuho/dataframe" "github.com/gyuho/dataframe"
"github.com/olekukonko/tablewriter" "github.com/olekukonko/tablewriter"
) )
@ -30,15 +32,15 @@ type SSEntry struct {
} }
// GetSS finds all SSEntry by given filter. // GetSS finds all SSEntry by given filter.
func GetSS(opts ...FilterFunc) (sss []SSEntry, err error) { func GetSS(opts ...OpFunc) (sss []SSEntry, err error) {
ft := &EntryFilter{} ft := &EntryOp{}
ft.applyOpts(opts) ft.applyOpts(opts)
var pids []int64 var pids []int64
switch { switch {
case ft.ProgramMatchFunc == nil && ft.PID < 1: case ft.ProgramMatchFunc == nil && ft.PID < 1:
// get all PIDs // get all PIDs
pids, err = ListPIDs() pids, err = proc.ListPIDs()
if err != nil { if err != nil {
return return
} }
@ -54,14 +56,9 @@ func GetSS(opts ...FilterFunc) (sss []SSEntry, err error) {
// applyOpts already panic when ft.ProgramMatchFunc != nil && ft.PID > 0 // applyOpts already panic when ft.ProgramMatchFunc != nil && ft.PID > 0
} }
up, err := GetProcUptime()
if err != nil {
return nil, err
}
if len(pids) == 0 { if len(pids) == 0 {
// find PIDs by Program // find PIDs by Program
if pids, err = ListPIDs(); err != nil { if pids, err = proc.ListPIDs(); err != nil {
return return
} }
} else { } else {
@ -73,16 +70,16 @@ func GetSS(opts ...FilterFunc) (sss []SSEntry, err error) {
var wg sync.WaitGroup var wg sync.WaitGroup
limitc := make(chan struct{}, maxConcurrentProcFDLimit) limitc := make(chan struct{}, maxConcurrentProcFDLimit)
f := func(pid int64, ttype TransportProtocol) { f := func(pid int64, ttype proc.TransportProtocol) {
defer func() { defer func() {
<-limitc <-limitc
wg.Done() wg.Done()
}() }()
limitc <- struct{}{} limitc <- struct{}{}
stat, err := GetProcStatByPID(pid, up) stat, err := proc.GetStatByPID(pid)
if err != nil { if err != nil {
log.Printf("GetProcStatByPID error %v for PID %d", err, pid) log.Printf("proc.GetStatByPID error %v for PID %d", err, pid)
return return
} }
if !ft.ProgramMatchFunc(stat.Comm) { if !ft.ProgramMatchFunc(stat.Comm) {
@ -113,10 +110,10 @@ func GetSS(opts ...FilterFunc) (sss []SSEntry, err error) {
} }
for _, pid := range pids { for _, pid := range pids {
if ft.TCP { if ft.TCP {
go f(pid, TypeTCP) go f(pid, proc.TypeTCP)
} }
if ft.TCP6 { if ft.TCP6 {
go f(pid, TypeTCP6) go f(pid, proc.TypeTCP6)
} }
} }
wg.Wait() wg.Wait()
@ -127,12 +124,12 @@ func GetSS(opts ...FilterFunc) (sss []SSEntry, err error) {
return return
} }
func getSSEntry(pid int64, tp TransportProtocol, lport int64, rport int64) (sss []SSEntry, err error) { func getSSEntry(pid int64, tp proc.TransportProtocol, lport int64, rport int64) (sss []SSEntry, err error) {
nss, nerr := GetProcNetTCPByPID(pid, tp) nss, nerr := proc.GetNetTCPByPID(pid, tp)
if nerr != nil { if nerr != nil {
return nil, nerr return nil, nerr
} }
pname, perr := GetProgram(pid) pname, perr := proc.GetProgram(pid)
if perr != nil { if perr != nil {
return nil, perr return nil, perr
} }

View File

@ -0,0 +1,62 @@
// Package fileutil implements file utilities.
package fileutil
import "os"
func OpenToRead(fpath string) (*os.File, error) {
f, err := os.OpenFile(fpath, os.O_RDONLY, 0444)
if err != nil {
return f, err
}
return f, nil
}
func OpenToAppend(fpath string) (*os.File, error) {
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0777)
if err != nil {
return nil, err
}
return f, nil
}
func OpenToOverwrite(fpath string) (*os.File, error) {
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0777)
if err != nil {
return nil, err
}
return f, nil
}
func ToFile(txt, fpath string) error {
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_TRUNC, 0777)
if err != nil {
f, err = os.Create(fpath)
if err != nil {
return err
}
}
defer f.Close()
if _, err := f.WriteString(txt); err != nil {
return err
}
return nil
}
// Exist returns true if the file or directory exists.
func Exist(fpath string) bool {
st, err := os.Stat(fpath)
if err != nil {
if os.IsNotExist(err) {
return false
}
}
if st.IsDir() {
return true
}
if _, err := os.Stat(fpath); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}

View File

@ -0,0 +1,33 @@
// Package timeutil implements time utilities.
package timeutil
import (
"strings"
"time"
humanize "github.com/dustin/go-humanize"
)
func NowPST() time.Time {
tzone, err := time.LoadLocation("America/Los_Angeles")
if err != nil {
return time.Now()
}
return time.Now().In(tzone)
}
func HumanizeDurationMs(ms uint64) string {
s := humanize.Time(time.Now().Add(-1 * time.Duration(ms) * time.Millisecond))
if s == "now" {
s = "0 seconds"
}
return strings.TrimSpace(strings.Replace(s, " ago", "", -1))
}
func HumanizeDurationSecond(sec uint64) string {
s := humanize.Time(time.Now().Add(-1 * time.Duration(sec) * time.Second))
if s == "now" {
s = "0 seconds"
}
return strings.TrimSpace(strings.Replace(s, " ago", "", -1))
}

144
vendor/github.com/gyuho/linux-inspect/proc/diskstats.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
package proc
import (
"bufio"
"fmt"
"strconv"
"strings"
"github.com/gyuho/linux-inspect/pkg/fileutil"
"github.com/gyuho/linux-inspect/pkg/timeutil"
)
type diskstatsColumnIndex int
const (
diskstats_idx_major_number diskstatsColumnIndex = iota
diskstats_idx_minor_number
diskstats_idx_device_name
diskstats_idx_reads_completed
diskstats_idx_reads_merged
diskstats_idx_sectors_read
diskstats_idx_time_spent_on_reading_ms
diskstats_idx_writes_completed
diskstats_idx_writes_merged
diskstats_idx_sectors_written
diskstats_idx_time_spent_on_writing_ms
diskstats_idx_ios_in_progress
diskstats_idx_time_spent_on_ios_ms
diskstats_idx_weighted_time_spent_on_ios_ms
)
// GetDiskstats reads '/proc/diskstats'.
func GetDiskstats() ([]DiskStat, error) {
f, err := fileutil.OpenToRead("/proc/diskstats")
if err != nil {
return nil, err
}
defer f.Close()
dss := []DiskStat{}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
txt := scanner.Text()
if len(txt) == 0 {
continue
}
ds := strings.Fields(strings.TrimSpace(txt))
if len(ds) < int(diskstats_idx_weighted_time_spent_on_ios_ms+1) {
return nil, fmt.Errorf("not enough columns at %v", ds)
}
d := DiskStat{}
mn, err := strconv.ParseUint(ds[diskstats_idx_major_number], 10, 64)
if err != nil {
return nil, err
}
d.MajorNumber = mn
mn, err = strconv.ParseUint(ds[diskstats_idx_minor_number], 10, 64)
if err != nil {
return nil, err
}
d.MinorNumber = mn
d.DeviceName = strings.TrimSpace(ds[diskstats_idx_device_name])
mn, err = strconv.ParseUint(ds[diskstats_idx_reads_completed], 10, 64)
if err != nil {
return nil, err
}
d.ReadsCompleted = mn
mn, err = strconv.ParseUint(ds[diskstats_idx_reads_merged], 10, 64)
if err != nil {
return nil, err
}
d.ReadsMerged = mn
mn, err = strconv.ParseUint(ds[diskstats_idx_sectors_read], 10, 64)
if err != nil {
return nil, err
}
d.SectorsRead = mn
mn, err = strconv.ParseUint(ds[diskstats_idx_time_spent_on_reading_ms], 10, 64)
if err != nil {
return nil, err
}
d.TimeSpentOnReadingMs = mn
d.TimeSpentOnReadingMsParsedTime = timeutil.HumanizeDurationMs(mn)
mn, err = strconv.ParseUint(ds[diskstats_idx_writes_completed], 10, 64)
if err != nil {
return nil, err
}
d.WritesCompleted = mn
mn, err = strconv.ParseUint(ds[diskstats_idx_writes_merged], 10, 64)
if err != nil {
return nil, err
}
d.WritesMerged = mn
mn, err = strconv.ParseUint(ds[diskstats_idx_sectors_written], 10, 64)
if err != nil {
return nil, err
}
d.SectorsWritten = mn
mn, err = strconv.ParseUint(ds[diskstats_idx_time_spent_on_writing_ms], 10, 64)
if err != nil {
return nil, err
}
d.TimeSpentOnWritingMs = mn
d.TimeSpentOnWritingMsParsedTime = timeutil.HumanizeDurationMs(mn)
mn, err = strconv.ParseUint(ds[diskstats_idx_ios_in_progress], 10, 64)
if err != nil {
return nil, err
}
d.IOsInProgress = mn
mn, err = strconv.ParseUint(ds[diskstats_idx_time_spent_on_ios_ms], 10, 64)
if err != nil {
return nil, err
}
d.TimeSpentOnIOsMs = mn
d.TimeSpentOnIOsMsParsedTime = timeutil.HumanizeDurationMs(mn)
mn, err = strconv.ParseUint(ds[diskstats_idx_weighted_time_spent_on_ios_ms], 10, 64)
if err != nil {
return nil, err
}
d.WeightedTimeSpentOnIOsMs = mn
d.WeightedTimeSpentOnIOsMsParsedTime = timeutil.HumanizeDurationMs(mn)
dss = append(dss, d)
}
return dss, nil
}

2
vendor/github.com/gyuho/linux-inspect/proc/doc.go generated vendored Normal file
View File

@ -0,0 +1,2 @@
// Package proc represents Linux '/proc'.
package proc

View File

@ -1,6 +1,6 @@
package psn package proc
// updated at 2017-03-06 15:17:44.4679204 -0800 PST // updated at 2017-05-09 15:51:40.352716363 -0700 PDT
// NetDev is '/proc/net/dev' in Linux. // NetDev is '/proc/net/dev' in Linux.
// The dev pseudo-file contains network device status information. // The dev pseudo-file contains network device status information.
@ -78,41 +78,6 @@ type NetTCP struct {
Inode string `column:"inode"` Inode string `column:"inode"`
} }
// TopCommandRow represents a row in 'top' command output.
type TopCommandRow struct {
// PID is pid of the process.
PID int64 `column:"pid"`
// USER is user name.
USER string `column:"user"`
// PR is priority.
PR string `column:"pr"`
// NI is nice value of the task.
NI string `column:"ni"`
// VIRT is total amount of virtual memory used by the task (in KiB).
VIRT string `column:"virt"`
VIRTBytesN uint64 `column:"virt_bytes_n"`
VIRTParsedBytes string `column:"virt_parsed_bytes"`
// RES is non-swapped physical memory a task is using (in KiB).
RES string `column:"res"`
RESBytesN uint64 `column:"res_bytes_n"`
RESParsedBytes string `column:"res_parsed_bytes"`
// SHR is amount of shared memory available to a task, not all of which is typically resident (in KiB).
SHR string `column:"shr"`
SHRBytesN uint64 `column:"shr_bytes_n"`
SHRParsedBytes string `column:"shr_parsed_bytes"`
// S is process status.
S string `column:"s"`
SParsedStatus string `column:"s_parsed_status"`
// CPUPercent is %CPU.
CPUPercent float64 `column:"cpupercent"`
// MEMPercent is %MEM.
MEMPercent float64 `column:"mempercent"`
// TIME is CPU time (TIME+).
TIME string `column:"time"`
// COMMAND is command.
COMMAND string `column:"command"`
}
// LoadAvg is '/proc/loadavg' in Linux. // LoadAvg is '/proc/loadavg' in Linux.
type LoadAvg struct { type LoadAvg struct {
// LoadAvg1Minute is total uptime in seconds. // LoadAvg1Minute is total uptime in seconds.
@ -139,58 +104,6 @@ type Uptime struct {
UptimeIdleParsedTime string `column:"uptime_idle_parsed_time"` UptimeIdleParsedTime string `column:"uptime_idle_parsed_time"`
} }
// DfCommandRow is 'df' command output row in Linux.
type DfCommandRow struct {
// FileSystem is file system ('source').
FileSystem string `column:"file_system"`
// Device is device name.
Device string `column:"device"`
// MountedOn is 'mounted on' ('target').
MountedOn string `column:"mounted_on"`
// FileSystemType is file system type ('fstype').
FileSystemType string `column:"file_system_type"`
// File is file name if specified on the command line ('file').
File string `column:"file"`
// Inodes is total number of inodes ('itotal').
Inodes int64 `column:"inodes"`
// Ifree is number of available inodes ('iavail').
Ifree int64 `column:"ifree"`
// Iused is number of used inodes ('iused').
Iused int64 `column:"iused"`
// IusedPercent is percentage of iused divided by itotal ('ipcent').
IusedPercent string `column:"iused_percent"`
// TotalBlocks is total number of 1K-blocks ('size').
TotalBlocks int64 `column:"total_blocks"`
TotalBlocksBytesN int64 `column:"total_blocks_bytes_n"`
TotalBlocksParsedBytes string `column:"total_blocks_parsed_bytes"`
// AvailableBlocks is number of available 1K-blocks ('avail').
AvailableBlocks int64 `column:"available_blocks"`
AvailableBlocksBytesN int64 `column:"available_blocks_bytes_n"`
AvailableBlocksParsedBytes string `column:"available_blocks_parsed_bytes"`
// UsedBlocks is number of used 1K-blocks ('used').
UsedBlocks int64 `column:"used_blocks"`
UsedBlocksBytesN int64 `column:"used_blocks_bytes_n"`
UsedBlocksParsedBytes string `column:"used_blocks_parsed_bytes"`
// UsedBlocksPercent is percentage of used-blocks divided by total-blocks ('pcent').
UsedBlocksPercent string `column:"used_blocks_percent"`
}
// Mtab is '/etc/mtab' in Linux.
type Mtab struct {
// FileSystem is file system.
FileSystem string `column:"file_system"`
// MountedOn is 'mounted on'.
MountedOn string `column:"mounted_on"`
// FileSystemType is file system type.
FileSystemType string `column:"file_system_type"`
// Options is file system type.
Options string `column:"options"`
// Dump is number indicating whether and how often the file system should be backed up by the dump program; a zero indicates the file system will never be automatically backed up.
Dump int `column:"dump"`
// Pass is number indicating the order in which the fsck program will check the devices for errors at boot time; this is 1 for the root file system and either 2 (meaning check after root) or 0 (do not check) for all other devices.
Pass int `column:"pass"`
}
// DiskStat is '/proc/diskstats' in Linux. // DiskStat is '/proc/diskstats' in Linux.
type DiskStat struct { type DiskStat struct {
// MajorNumber is major device number. // MajorNumber is major device number.

View File

@ -1,17 +1,19 @@
package psn package proc
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"github.com/gyuho/linux-inspect/pkg/fileutil"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
) )
// GetProcIOByPID reads '/proc/$PID/io' data. // GetIOByPID reads '/proc/$PID/io' data.
func GetProcIOByPID(pid int64) (s IO, err error) { func GetIOByPID(pid int64) (s IO, err error) {
fpath := fmt.Sprintf("/proc/%d/io", pid) fpath := fmt.Sprintf("/proc/%d/io", pid)
f, err := openToRead(fpath) f, err := fileutil.OpenToRead(fpath)
if err != nil { if err != nil {
return IO{}, err return IO{}, err
} }

44
vendor/github.com/gyuho/linux-inspect/proc/list.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
package proc
import (
"io/ioutil"
"path/filepath"
"strconv"
"strings"
)
// ListPIDs reads all PIDs in '/proc'.
func ListPIDs() ([]int64, error) {
ds, err := ioutil.ReadDir("/proc")
if err != nil {
return nil, err
}
pids := make([]int64, 0, len(ds))
for _, f := range ds {
if f.IsDir() && isInt(f.Name()) {
id, err := strconv.ParseInt(f.Name(), 10, 64)
if err != nil {
return nil, err
}
pids = append(pids, id)
}
}
return pids, nil
}
// ListFds reads '/proc/*/fd/*' to grab process IDs.
func ListFds() ([]string, error) {
// returns the names of all files matching pattern
// or nil if there is no matching file
fs, err := filepath.Glob("/proc/[0-9]*/fd/[0-9]*")
if err != nil {
return nil, err
}
return fs, nil
}
func pidFromFd(s string) (int64, error) {
// get 5261 from '/proc/5261/fd/69'
return strconv.ParseInt(strings.Split(s, "/")[2], 10, 64)
}

View File

@ -1,34 +1,36 @@
package psn package proc
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"strconv" "strconv"
"strings" "strings"
"github.com/gyuho/linux-inspect/pkg/fileutil"
) )
type procLoadAvgColumnIndex int type loadAvgColumnIndex int
const ( const (
proc_loadavg_idx_load_avg_1_minute procLoadAvgColumnIndex = iota load_avg_idx_load_avg_1_minute loadAvgColumnIndex = iota
proc_loadavg_idx_load_avg_5_minute load_avg_idx_load_avg_5_minute
proc_loadavg_idx_load_avg_15_minute load_avg_idx_load_avg_15_minute
proc_loadavg_idx_kernel_scheduling_entities_with_slash load_avg_idx_kernel_scheduling_entities_with_slash
proc_loadavg_idx_pid load_avg_idx_pid
) )
// GetProcLoadAvg reads '/proc/loadavg'. // GetLoadAvg reads '/proc/loadavg'.
// Expected output is '0.37 0.47 0.39 1/839 31397'. // Expected output is '0.37 0.47 0.39 1/839 31397'.
func GetProcLoadAvg() (LoadAvg, error) { func GetLoadAvg() (LoadAvg, error) {
txt, err := readProcLoadAvg() txt, err := readLoadAvg()
if err != nil { if err != nil {
return LoadAvg{}, err return LoadAvg{}, err
} }
return getProcLoadAvg(txt) return getLoadAvg(txt)
} }
func readProcLoadAvg() (string, error) { func readLoadAvg() (string, error) {
f, err := openToRead("/proc/loadavg") f, err := fileutil.OpenToRead("/proc/loadavg")
if err != nil { if err != nil {
return "", err return "", err
} }
@ -41,7 +43,7 @@ func readProcLoadAvg() (string, error) {
return strings.TrimSpace(string(bts)), nil return strings.TrimSpace(string(bts)), nil
} }
func getProcLoadAvg(txt string) (LoadAvg, error) { func getLoadAvg(txt string) (LoadAvg, error) {
ds := strings.Fields(txt) ds := strings.Fields(txt)
if len(ds) < 5 { if len(ds) < 5 {
return LoadAvg{}, fmt.Errorf("not enough columns at %v", ds) return LoadAvg{}, fmt.Errorf("not enough columns at %v", ds)
@ -49,25 +51,25 @@ func getProcLoadAvg(txt string) (LoadAvg, error) {
lavg := LoadAvg{} lavg := LoadAvg{}
avg1, err := strconv.ParseFloat(ds[proc_loadavg_idx_load_avg_1_minute], 64) avg1, err := strconv.ParseFloat(ds[load_avg_idx_load_avg_1_minute], 64)
if err != nil { if err != nil {
return LoadAvg{}, err return LoadAvg{}, err
} }
lavg.LoadAvg1Minute = avg1 lavg.LoadAvg1Minute = avg1
avg5, err := strconv.ParseFloat(ds[proc_loadavg_idx_load_avg_5_minute], 64) avg5, err := strconv.ParseFloat(ds[load_avg_idx_load_avg_5_minute], 64)
if err != nil { if err != nil {
return LoadAvg{}, err return LoadAvg{}, err
} }
lavg.LoadAvg5Minute = avg5 lavg.LoadAvg5Minute = avg5
avg15, err := strconv.ParseFloat(ds[proc_loadavg_idx_load_avg_15_minute], 64) avg15, err := strconv.ParseFloat(ds[load_avg_idx_load_avg_15_minute], 64)
if err != nil { if err != nil {
return LoadAvg{}, err return LoadAvg{}, err
} }
lavg.LoadAvg15Minute = avg15 lavg.LoadAvg15Minute = avg15
slashed := strings.Split(ds[proc_loadavg_idx_kernel_scheduling_entities_with_slash], "/") slashed := strings.Split(ds[load_avg_idx_kernel_scheduling_entities_with_slash], "/")
if len(slashed) != 2 { if len(slashed) != 2 {
return LoadAvg{}, fmt.Errorf("expected '/' string in kernel scheduling entities field, got %v", slashed) return LoadAvg{}, fmt.Errorf("expected '/' string in kernel scheduling entities field, got %v", slashed)
} }
@ -83,7 +85,7 @@ func getProcLoadAvg(txt string) (LoadAvg, error) {
} }
lavg.CurrentKernelSchedulingEntities = s2 lavg.CurrentKernelSchedulingEntities = s2
pid, err := strconv.ParseInt(ds[proc_loadavg_idx_pid], 10, 64) pid, err := strconv.ParseInt(ds[load_avg_idx_pid], 10, 64)
if err != nil { if err != nil {
return LoadAvg{}, err return LoadAvg{}, err
} }

185
vendor/github.com/gyuho/linux-inspect/proc/net_dev.go generated vendored Normal file
View File

@ -0,0 +1,185 @@
package proc
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"strconv"
"strings"
"github.com/gyuho/linux-inspect/pkg/fileutil"
humanize "github.com/dustin/go-humanize"
)
type netDevColumnIndex int
const (
net_dev_idx_interface netDevColumnIndex = iota
net_dev_idx_receive_bytes
net_dev_idx_receive_packets
net_dev_idx_receive_errs
net_dev_idx_receive_drop
net_dev_idx_receive_fifo
net_dev_idx_receive_frame
net_dev_idx_receive_compressed
net_dev_idx_receive_multicast
net_dev_idx_transmit_bytes
net_dev_idx_transmit_packets
net_dev_idx_transmit_errs
net_dev_idx_transmit_drop
net_dev_idx_transmit_fifo
net_dev_idx_transmit_colls
net_dev_idx_transmit_carrier
)
// GetNetDev reads '/proc/net/dev'.
func GetNetDev() (nds []NetDev, err error) {
var d []byte
d, err = readNetDev()
if err != nil {
return nil, err
}
header := true
scanner := bufio.NewScanner(bytes.NewReader(d))
for scanner.Scan() {
txt := scanner.Text()
if len(txt) == 0 {
continue
}
ds := strings.Fields(strings.TrimSpace(txt))
if header {
if strings.HasPrefix(ds[0], "Inter") {
continue
}
if strings.HasSuffix(ds[0], "face") {
header = false
continue
}
}
if len(ds) < int(net_dev_idx_transmit_carrier+1) {
return nil, fmt.Errorf("not enough columns at %v", ds)
}
d := NetDev{}
d.Interface = strings.TrimSpace(ds[net_dev_idx_interface])
d.Interface = d.Interface[:len(d.Interface)-1] // remove ':' from 'wlp2s0:'
mn, err := strconv.ParseUint(ds[net_dev_idx_receive_bytes], 10, 64)
if err != nil {
return nil, err
}
d.ReceiveBytes = mn
d.ReceiveBytesBytesN = mn
d.ReceiveBytesParsedBytes = humanize.Bytes(mn)
mn, err = strconv.ParseUint(ds[net_dev_idx_transmit_bytes], 10, 64)
if err != nil {
return nil, err
}
d.TransmitBytes = mn
d.TransmitBytesBytesN = mn
d.TransmitBytesParsedBytes = humanize.Bytes(mn)
mn, err = strconv.ParseUint(ds[net_dev_idx_receive_packets], 10, 64)
if err != nil {
return nil, err
}
d.ReceivePackets = mn
mn, err = strconv.ParseUint(ds[net_dev_idx_receive_errs], 10, 64)
if err != nil {
return nil, err
}
d.ReceiveErrs = mn
mn, err = strconv.ParseUint(ds[net_dev_idx_receive_drop], 10, 64)
if err != nil {
return nil, err
}
d.ReceiveDrop = mn
mn, err = strconv.ParseUint(ds[net_dev_idx_receive_fifo], 10, 64)
if err != nil {
return nil, err
}
d.ReceiveFifo = mn
mn, err = strconv.ParseUint(ds[net_dev_idx_receive_frame], 10, 64)
if err != nil {
return nil, err
}
d.ReceiveFrame = mn
mn, err = strconv.ParseUint(ds[net_dev_idx_receive_compressed], 10, 64)
if err != nil {
return nil, err
}
d.ReceiveCompressed = mn
mn, err = strconv.ParseUint(ds[net_dev_idx_receive_multicast], 10, 64)
if err != nil {
return nil, err
}
d.ReceiveMulticast = mn
mn, err = strconv.ParseUint(ds[net_dev_idx_transmit_packets], 10, 64)
if err != nil {
return nil, err
}
d.TransmitPackets = mn
mn, err = strconv.ParseUint(ds[net_dev_idx_transmit_errs], 10, 64)
if err != nil {
return nil, err
}
d.TransmitErrs = mn
mn, err = strconv.ParseUint(ds[net_dev_idx_transmit_drop], 10, 64)
if err != nil {
return nil, err
}
d.TransmitDrop = mn
mn, err = strconv.ParseUint(ds[net_dev_idx_transmit_fifo], 10, 64)
if err != nil {
return nil, err
}
d.TransmitFifo = mn
mn, err = strconv.ParseUint(ds[net_dev_idx_transmit_colls], 10, 64)
if err != nil {
return nil, err
}
d.TransmitColls = mn
mn, err = strconv.ParseUint(ds[net_dev_idx_transmit_carrier], 10, 64)
if err != nil {
return nil, err
}
d.TransmitCarrier = mn
nds = append(nds, d)
}
return nds, nil
}
func readNetDev() ([]byte, error) {
f, err := fileutil.OpenToRead("/proc/net/dev")
if err != nil {
return nil, err
}
err = f.Close()
b, berr := ioutil.ReadAll(f)
if err != nil {
berr = fmt.Errorf("%v; %v", err, berr)
}
return b, berr
}

213
vendor/github.com/gyuho/linux-inspect/proc/net_tcp.go generated vendored Normal file
View File

@ -0,0 +1,213 @@
package proc
import (
"bufio"
"fmt"
"io/ioutil"
"strconv"
"strings"
"github.com/gyuho/linux-inspect/pkg/fileutil"
"bytes"
)
// GetNetTCPByPID reads '/proc/$PID/net/tcp(6)' data.
func GetNetTCPByPID(pid int64, tp TransportProtocol) ([]NetTCP, error) {
d, err := readNetTCP(pid, tp)
if err != nil {
return nil, err
}
var ipParse func(string) (string, int64, error)
switch tp {
case TypeTCP:
ipParse = parseLittleEndianIpv4
case TypeTCP6:
ipParse = parseLittleEndianIpv6
}
return parseNetTCP(d, ipParse, tp.String())
}
// TransportProtocol is tcp, tcp6.
type TransportProtocol int
const (
TypeTCP TransportProtocol = iota
TypeTCP6
)
func (tp TransportProtocol) String() string {
switch tp {
case TypeTCP:
return "tcp"
case TypeTCP6:
return "tcp6"
default:
panic(fmt.Errorf("unknown transport protocol %d", tp))
}
}
type netColumnIndex int
const (
net_tcp_idx_sl netColumnIndex = iota
net_tcp_idx_local_address
net_tcp_idx_remote_address
net_tcp_idx_st
net_tcp_idx_tx_queue_rx_queue
net_tcp_idx_tr_tm_when
net_tcp_idx_retrnsmt
net_tcp_idx_uid
net_tcp_idx_timeout
net_tcp_idx_inode
)
var (
// RPC_SHOW_SOCK
// https://github.com/torvalds/linux/blob/master/include/trace/events/sunrpc.h
netTCPStatus = map[string]string{
"01": "ESTABLISHED",
"02": "SYN_SENT",
"03": "SYN_RECV",
"04": "FIN_WAIT1",
"05": "FIN_WAIT2",
"06": "TIME_WAIT",
"07": "CLOSE",
"08": "CLOSE_WAIT",
"09": "LAST_ACK",
"0A": "LISTEN",
"0B": "CLOSING",
}
)
func parseNetTCP(d []byte, ipParse func(string) (string, int64, error), ipType string) ([]NetTCP, error) {
rows := [][]string{}
first := true
scanner := bufio.NewScanner(bytes.NewReader(d))
for scanner.Scan() {
txt := scanner.Text()
if len(txt) == 0 {
continue
}
fs := strings.Fields(txt)
if len(fs) < int(net_tcp_idx_inode+1) {
return nil, fmt.Errorf("not enough columns at %v", fs)
}
if first {
if fs[0] != "sl" { // header
return nil, fmt.Errorf("first line must be columns but got = %#q", fs)
}
first = false
continue
}
row := make([]string, 10)
copy(row, fs[:net_tcp_idx_inode+1])
rows = append(rows, row)
}
if err := scanner.Err(); err != nil {
return nil, err
}
nch, errc := make(chan NetTCP), make(chan error)
for _, row := range rows {
go func(row []string) {
np := NetTCP{}
np.Type = ipType
sn, err := strconv.ParseUint(strings.Replace(row[net_tcp_idx_sl], ":", "", -1), 10, 64)
if err != nil {
errc <- err
return
}
np.Sl = sn
np.LocalAddress = strings.TrimSpace(row[net_tcp_idx_local_address])
lp, lt, err := ipParse(row[net_tcp_idx_local_address])
if err != nil {
errc <- err
return
}
np.LocalAddressParsedIPHost = strings.TrimSpace(lp)
np.LocalAddressParsedIPPort = lt
np.RemAddress = strings.TrimSpace(row[net_tcp_idx_remote_address])
rp, rt, err := ipParse(row[net_tcp_idx_remote_address])
if err != nil {
errc <- err
return
}
np.RemAddressParsedIPHost = strings.TrimSpace(rp)
np.RemAddressParsedIPPort = rt
np.St = strings.TrimSpace(row[net_tcp_idx_st])
np.StParsedStatus = strings.TrimSpace(netTCPStatus[row[net_tcp_idx_st]])
qs := strings.Split(row[net_tcp_idx_tx_queue_rx_queue], ":")
if len(qs) == 2 {
np.TxQueue = qs[0]
np.RxQueue = qs[1]
}
trs := strings.Split(row[net_tcp_idx_tr_tm_when], ":")
if len(trs) == 2 {
np.Tr = trs[0]
np.TmWhen = trs[1]
}
np.Retrnsmt = row[net_tcp_idx_retrnsmt]
un, err := strconv.ParseUint(row[net_tcp_idx_uid], 10, 64)
if err != nil {
errc <- err
return
}
np.Uid = un
to, err := strconv.ParseUint(row[net_tcp_idx_timeout], 10, 64)
if err != nil {
errc <- err
return
}
np.Timeout = to
np.Inode = strings.TrimSpace(row[net_tcp_idx_inode])
nch <- np
}(row)
}
nss := make([]NetTCP, 0, len(rows))
cn, limit := 0, len(rows)
for cn != limit {
select {
case err := <-errc:
return nil, err
case p := <-nch:
nss = append(nss, p)
cn++
}
}
close(nch)
close(errc)
return nss, nil
}
func readNetTCP(pid int64, tp TransportProtocol) ([]byte, error) {
fpath := fmt.Sprintf("/proc/%d/net/%s", pid, tp.String())
f, err := fileutil.OpenToRead(fpath)
if err != nil {
return nil, err
}
err = f.Close()
b, berr := ioutil.ReadAll(f)
if err != nil {
berr = fmt.Errorf("%v; %v", err, berr)
}
return b, berr
}

View File

@ -1,4 +1,4 @@
package psn package proc
import ( import (
"fmt" "fmt"

299
vendor/github.com/gyuho/linux-inspect/proc/schema.go generated vendored Normal file
View File

@ -0,0 +1,299 @@
package proc
import (
"reflect"
"github.com/gyuho/linux-inspect/schema"
)
// NetDevSchema represents '/proc/net/dev'.
// Reference http://man7.org/linux/man-pages/man5/proc.5.html
// and http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html.
var NetDevSchema = schema.RawData{
IsYAML: false,
Columns: []schema.Column{
{Name: "interface", Godoc: "network interface", Kind: reflect.String},
{Name: "receive_bytes", Godoc: "total number of bytes of data received by the interface", Kind: reflect.Uint64},
{Name: "receive_packets", Godoc: "total number of packets of data received by the interface", Kind: reflect.Uint64},
{Name: "receive_errs", Godoc: "total number of receive errors detected by the device driver", Kind: reflect.Uint64},
{Name: "receive_drop", Godoc: "total number of packets dropped by the device driver", Kind: reflect.Uint64},
{Name: "receive_fifo", Godoc: "number of FIFO buffer errors", Kind: reflect.Uint64},
{Name: "receive_frame", Godoc: "number of packet framing errors", Kind: reflect.Uint64},
{Name: "receive_compressed", Godoc: "number of compressed packets received by the device driver", Kind: reflect.Uint64},
{Name: "receive_multicast", Godoc: "number of multicast frames received by the device driver", Kind: reflect.Uint64},
{Name: "transmit_bytes", Godoc: "total number of bytes of data transmitted by the interface", Kind: reflect.Uint64},
{Name: "transmit_packets", Godoc: "total number of packets of data transmitted by the interface", Kind: reflect.Uint64},
{Name: "transmit_errs", Godoc: "total number of receive errors detected by the device driver", Kind: reflect.Uint64},
{Name: "transmit_drop", Godoc: "total number of packets dropped by the device driver", Kind: reflect.Uint64},
{Name: "transmit_fifo", Godoc: "number of FIFO buffer errors", Kind: reflect.Uint64},
{Name: "transmit_colls", Godoc: "number of collisions detected on the interface", Kind: reflect.Uint64},
{Name: "transmit_carrier", Godoc: "number of carrier losses detected by the device driver", Kind: reflect.Uint64},
},
ColumnsToParse: map[string]schema.RawDataType{
"receive_bytes": schema.TypeBytes,
"transmit_bytes": schema.TypeBytes,
},
}
// NetTCPSchema represents '/proc/net/tcp' and '/proc/net/tcp6'.
// Reference http://man7.org/linux/man-pages/man5/proc.5.html
// and http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html.
var NetTCPSchema = schema.RawData{
IsYAML: false,
Columns: []schema.Column{
{Name: "sl", Godoc: "kernel hash slot", Kind: reflect.Uint64},
{Name: "local_address", Godoc: "local-address:port", Kind: reflect.String},
{Name: "rem_address", Godoc: "remote-address:port", Kind: reflect.String},
{Name: "st", Godoc: "internal status of socket", Kind: reflect.String},
{Name: "tx_queue", Godoc: "outgoing data queue in terms of kernel memory usage", Kind: reflect.String},
{Name: "rx_queue", Godoc: "incoming data queue in terms of kernel memory usage", Kind: reflect.String},
{Name: "tr", Godoc: "internal information of the kernel socket state", Kind: reflect.String},
{Name: "tm->when", Godoc: "internal information of the kernel socket state", Kind: reflect.String},
{Name: "retrnsmt", Godoc: "internal information of the kernel socket state", Kind: reflect.String},
{Name: "uid", Godoc: "effective UID of the creator of the socket", Kind: reflect.Uint64},
{Name: "timeout", Godoc: "timeout", Kind: reflect.Uint64},
{Name: "inode", Godoc: "inode raw data", Kind: reflect.String},
},
ColumnsToParse: map[string]schema.RawDataType{
"local_address": schema.TypeIPAddress,
"rem_address": schema.TypeIPAddress,
"st": schema.TypeStatus,
},
}
// LoadAvgSchema represents '/proc/loadavg'.
// Reference http://man7.org/linux/man-pages/man5/proc.5.html.
var LoadAvgSchema = schema.RawData{
IsYAML: false,
Columns: []schema.Column{
{Name: "load-avg-1-minute", Godoc: "total uptime in seconds", Kind: reflect.Float64},
{Name: "load-avg-5-minute", Godoc: "total uptime in seconds", Kind: reflect.Float64},
{Name: "load-avg-15-minute", Godoc: "total uptime in seconds", Kind: reflect.Float64},
{Name: "runnable-kernel-scheduling-entities", Godoc: "number of currently runnable kernel scheduling entities (processes, threads)", Kind: reflect.Int64},
{Name: "current-kernel-scheduling-entities", Godoc: "number of kernel scheduling entities that currently exist on the system", Kind: reflect.Int64},
{Name: "pid", Godoc: "PID of the process that was most recently created on the system", Kind: reflect.Int64},
},
ColumnsToParse: map[string]schema.RawDataType{},
}
// UptimeSchema represents '/proc/uptime'.
// Reference http://man7.org/linux/man-pages/man5/proc.5.html.
var UptimeSchema = schema.RawData{
IsYAML: false,
Columns: []schema.Column{
{Name: "uptime-total", Godoc: "total uptime in seconds", Kind: reflect.Float64},
{Name: "uptime-idle", Godoc: "total amount of time in seconds spent in idle process", Kind: reflect.Float64},
},
ColumnsToParse: map[string]schema.RawDataType{
"uptime-total": schema.TypeTimeSeconds,
"uptime-idle": schema.TypeTimeSeconds,
},
}
// DiskStatSchema represents '/proc/diskstats'.
// Reference https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
// and https://www.kernel.org/doc/Documentation/iostats.txt.
var DiskStatSchema = schema.RawData{
IsYAML: false,
Columns: []schema.Column{
{Name: "major-number", Godoc: "major device number", Kind: reflect.Uint64},
{Name: "minor-number", Godoc: "minor device number", Kind: reflect.Uint64},
{Name: "device-name", Godoc: "device name", Kind: reflect.String},
{Name: "reads-completed", Godoc: "total number of reads completed successfully", Kind: reflect.Uint64},
{Name: "reads-merged", Godoc: "total number of reads merged when adjacent to each other", Kind: reflect.Uint64},
{Name: "sectors-read", Godoc: "total number of sectors read successfully", Kind: reflect.Uint64},
{Name: "time-spent-on-reading-ms", Godoc: "total number of milliseconds spent by all reads", Kind: reflect.Uint64},
{Name: "writes-completed", Godoc: "total number of writes completed successfully", Kind: reflect.Uint64},
{Name: "writes-merged", Godoc: "total number of writes merged when adjacent to each other", Kind: reflect.Uint64},
{Name: "sectors-written", Godoc: "total number of sectors written successfully", Kind: reflect.Uint64},
{Name: "time-spent-on-writing-ms", Godoc: "total number of milliseconds spent by all writes", Kind: reflect.Uint64},
{Name: "I/Os-in-progress", Godoc: "only field that should go to zero (incremented as requests are on request_queue)", Kind: reflect.Uint64},
{Name: "time-spent-on-I/Os-ms", Godoc: "milliseconds spent doing I/Os", Kind: reflect.Uint64},
{Name: "weighted-time-spent-on-I/Os-ms", Godoc: "weighted milliseconds spent doing I/Os (incremented at each I/O start, I/O completion, I/O merge)", Kind: reflect.Uint64},
},
ColumnsToParse: map[string]schema.RawDataType{
"time-spent-on-reading-ms": schema.TypeTimeMicroseconds,
"time-spent-on-writing-ms": schema.TypeTimeMicroseconds,
"time-spent-on-I/Os-ms": schema.TypeTimeMicroseconds,
"weighted-time-spent-on-I/Os-ms": schema.TypeTimeMicroseconds,
},
}
// IOSchema represents 'proc/$PID/io'.
// Reference http://man7.org/linux/man-pages/man5/proc.5.html.
var IOSchema = schema.RawData{
IsYAML: true,
Columns: []schema.Column{
{Name: "rchar", Godoc: "number of bytes which this task has caused to be read from storage (sum of bytes which this process passed to read)", Kind: reflect.Uint64},
{Name: "wchar", Godoc: "number of bytes which this task has caused, or shall cause to be written to disk", Kind: reflect.Uint64},
{Name: "syscr", Godoc: "number of read I/O operations", Kind: reflect.Uint64},
{Name: "syscw", Godoc: "number of write I/O operations", Kind: reflect.Uint64},
{Name: "read_bytes", Godoc: "number of bytes which this process really did cause to be fetched from the storage layer", Kind: reflect.Uint64},
{Name: "write_bytes", Godoc: "number of bytes which this process caused to be sent to the storage layer", Kind: reflect.Uint64},
{Name: "cancelled_write_bytes", Godoc: "number of bytes which this process caused to not happen by truncating pagecache", Kind: reflect.Uint64},
},
ColumnsToParse: map[string]schema.RawDataType{
"rchar": schema.TypeBytes,
"wchar": schema.TypeBytes,
"read_bytes": schema.TypeBytes,
"write_bytes": schema.TypeBytes,
"cancelled_write_bytes": schema.TypeBytes,
},
}
// StatSchema represents '/proc/$PID/stat'.
// Reference http://man7.org/linux/man-pages/man5/proc.5.html.
var StatSchema = schema.RawData{
IsYAML: false,
Columns: []schema.Column{
{Name: "pid", Godoc: "process ID", Kind: reflect.Int64},
{Name: "comm", Godoc: "filename of the executable (originally in parentheses, automatically removed by this package)", Kind: reflect.String},
{Name: "state", Godoc: "one character that represents the state of the process", Kind: reflect.String},
{Name: "ppid", Godoc: "PID of the parent process", Kind: reflect.Int64},
{Name: "pgrp", Godoc: "group ID of the process", Kind: reflect.Int64},
{Name: "session", Godoc: "session ID of the process", Kind: reflect.Int64},
{Name: "tty_nr", Godoc: "controlling terminal of the process", Kind: reflect.Int64},
{Name: "tpgid", Godoc: "ID of the foreground process group of the controlling terminal of the process", Kind: reflect.Int64},
{Name: "flags", Godoc: "kernel flags word of the process", Kind: reflect.Int64},
{Name: "minflt", Godoc: "number of minor faults the process has made which have not required loading a memory page from disk", Kind: reflect.Uint64},
{Name: "cminflt", Godoc: "number of minor faults that the process's waited-for children have made", Kind: reflect.Uint64},
{Name: "majflt", Godoc: "number of major faults the process has made which have required loading a memory page from disk", Kind: reflect.Uint64},
{Name: "cmajflt", Godoc: "number of major faults that the process's waited-for children have made", Kind: reflect.Uint64},
{Name: "utime", Godoc: "number of clock ticks that this process has been scheduled in user mode (includes guest_time)", Kind: reflect.Uint64},
{Name: "stime", Godoc: "number of clock ticks that this process has been scheduled in kernel mode", Kind: reflect.Uint64},
{Name: "cutime", Godoc: "number of clock ticks that this process's waited-for children have been scheduled in user mode", Kind: reflect.Uint64},
{Name: "cstime", Godoc: "number of clock ticks that this process's waited-for children have been scheduled in kernel mode", Kind: reflect.Uint64},
{Name: "priority", Godoc: "for processes running a real-time scheduling policy, the negated scheduling priority, minus one; that is, a number in the range -2 to -100, corresponding to real-time priorities 1 to 99. For processes running under a non-real-time scheduling policy, this is the raw nice value. The kernel stores nice values as numbers in the range 0 (high) to 39 (low)", Kind: reflect.Int64},
{Name: "nice", Godoc: "nice value, a value in the range 19 (low priority) to -20 (high priority)", Kind: reflect.Int64},
{Name: "num_threads", Godoc: "number of threads in this process", Kind: reflect.Int64},
{Name: "itrealvalue", Godoc: "no longer maintained", Kind: reflect.Int64},
{Name: "starttime", Godoc: "time(number of clock ticks) the process started after system boot", Kind: reflect.Uint64},
{Name: "vsize", Godoc: "virtual memory size in bytes", Kind: reflect.Uint64},
{Name: "rss", Godoc: "resident set size: number of pages the process has in real memory (text, data, or stack space but does not include pages which have not been demand-loaded in, or which are swapped out)", Kind: reflect.Int64},
{Name: "rsslim", Godoc: "current soft limit in bytes on the rss of the process", Kind: reflect.Uint64},
{Name: "startcode", Godoc: "address above which program text can run", Kind: reflect.Uint64},
{Name: "endcode", Godoc: "address below which program text can run", Kind: reflect.Uint64},
{Name: "startstack", Godoc: "address of the start (i.e., bottom) of the stack", Kind: reflect.Uint64},
{Name: "kstkesp", Godoc: "current value of ESP (stack pointer), as found in the kernel stack page for the process", Kind: reflect.Uint64},
{Name: "kstkeip", Godoc: "current EIP (instruction pointer)", Kind: reflect.Uint64},
{Name: "signal", Godoc: "obsolete, because it does not provide information on real-time signals (use /proc/$PID/status)", Kind: reflect.Uint64},
{Name: "blocked", Godoc: "obsolete, because it does not provide information on real-time signals (use /proc/$PID/status)", Kind: reflect.Uint64},
{Name: "sigignore", Godoc: "obsolete, because it does not provide information on real-time signals (use /proc/$PID/status)", Kind: reflect.Uint64},
{Name: "sigcatch", Godoc: "obsolete, because it does not provide information on real-time signals (use /proc/$PID/status)", Kind: reflect.Uint64},
{Name: "wchan", Godoc: "channel in which the process is waiting (address of a location in the kernel where the process is sleeping)", Kind: reflect.Uint64},
{Name: "nswap", Godoc: "not maintained (number of pages swapped)", Kind: reflect.Uint64},
{Name: "cnswap", Godoc: "not maintained (cumulative nswap for child processes)", Kind: reflect.Uint64},
{Name: "exit_signal", Godoc: "signal to be sent to parent when we die", Kind: reflect.Int64},
{Name: "processor", Godoc: "CPU number last executed on", Kind: reflect.Int64},
{Name: "rt_priority", Godoc: "real-time scheduling priority, a number in the range 1 to 99 for processes scheduled under a real-time policy, or 0, for non-real-time processes", Kind: reflect.Uint64},
{Name: "policy", Godoc: "scheduling policy", Kind: reflect.Uint64},
{Name: "delayacct_blkio_ticks", Godoc: "aggregated block I/O delays, measured in clock ticks", Kind: reflect.Uint64},
{Name: "guest_time", Godoc: "number of clock ticks spent running a virtual CPU for a guest operating system", Kind: reflect.Uint64},
{Name: "cguest_time", Godoc: "number of clock ticks (guest_time of the process's children)", Kind: reflect.Uint64},
{Name: "start_data", Godoc: "address above which program initialized and uninitialized (BSS) data are placed", Kind: reflect.Uint64},
{Name: "end_data", Godoc: "address below which program initialized and uninitialized (BSS) data are placed", Kind: reflect.Uint64},
{Name: "start_brk", Godoc: "address above which program heap can be expanded with brk", Kind: reflect.Uint64},
{Name: "arg_start", Godoc: "address above which program command-line arguments are placed", Kind: reflect.Uint64},
{Name: "arg_end", Godoc: "address below program command-line arguments are placed", Kind: reflect.Uint64},
{Name: "env_start", Godoc: "address above which program environment is placed", Kind: reflect.Uint64},
{Name: "env_end", Godoc: "address below which program environment is placed", Kind: reflect.Uint64},
{Name: "exit_code", Godoc: "thread's exit status in the form reported by waitpid(2)", Kind: reflect.Int64},
},
ColumnsToParse: map[string]schema.RawDataType{
"state": schema.TypeStatus,
"vsize": schema.TypeBytes,
"rss": schema.TypeBytes,
"rsslim": schema.TypeBytes,
},
}
// StatusSchema represents 'proc/$PID/status'.
// Reference http://man7.org/linux/man-pages/man5/proc.5.html.
var StatusSchema = schema.RawData{
IsYAML: true,
Columns: []schema.Column{
{Name: "Name", Godoc: "command run by this process", Kind: reflect.String},
{Name: "Umask", Godoc: "process umask, expressed in octal with a leading", Kind: reflect.String},
{Name: "State", Godoc: "current state of the process: R (running), S (sleeping), D (disk sleep), T (stopped), T (tracing stop), Z (zombie), or X (dead)", Kind: reflect.String},
{Name: "Tgid", Godoc: "thread group ID", Kind: reflect.Int64},
{Name: "Ngid", Godoc: "NUMA group ID", Kind: reflect.Int64},
{Name: "Pid", Godoc: "process ID", Kind: reflect.Int64},
{Name: "PPid", Godoc: "parent process ID, which launches the Pid", Kind: reflect.Int64},
{Name: "TracerPid", Godoc: "PID of process tracing this process (0 if not being traced)", Kind: reflect.Int64},
{Name: "Uid", Godoc: "real, effective, saved set, and filesystem UIDs", Kind: reflect.String},
{Name: "Gid", Godoc: "real, effective, saved set, and filesystem UIDs", Kind: reflect.String},
{Name: "FDSize", Godoc: "number of file descriptor slots currently allocated", Kind: reflect.Uint64},
{Name: "Groups", Godoc: "supplementary group list", Kind: reflect.String},
{Name: "NStgid", Godoc: "thread group ID (i.e., PID) in each of the PID namespaces of which [pid] is a member", Kind: reflect.String},
{Name: "NSpid", Godoc: "thread ID (i.e., PID) in each of the PID namespaces of which [pid] is a member", Kind: reflect.String},
{Name: "NSpgid", Godoc: "process group ID (i.e., PID) in each of the PID namespaces of which [pid] is a member", Kind: reflect.String},
{Name: "NSsid", Godoc: "descendant namespace session ID hierarchy Session ID in each of the PID namespaces of which [pid] is a member", Kind: reflect.String},
{Name: "VmPeak", Godoc: "peak virtual memory usage. Vm includes physical memory and swap", Kind: reflect.String},
{Name: "VmSize", Godoc: "current virtual memory usage. VmSize is the total amount of memory required for this process", Kind: reflect.String},
{Name: "VmLck", Godoc: "locked memory size", Kind: reflect.String},
{Name: "VmPin", Godoc: "pinned memory size (pages can't be moved, requires direct-access to physical memory)", Kind: reflect.String},
{Name: "VmHWM", Godoc: `peak resident set size ("high water mark")`, Kind: reflect.String},
{Name: "VmRSS", Godoc: "resident set size. VmRSS is the actual amount in memory. Some memory can be swapped out to physical disk. So this is the real memory usage of the process", Kind: reflect.String},
{Name: "VmData", Godoc: "size of data segment", Kind: reflect.String},
{Name: "VmStk", Godoc: "size of stack", Kind: reflect.String},
{Name: "VmExe", Godoc: "size of text segments", Kind: reflect.String},
{Name: "VmLib", Godoc: "shared library code size", Kind: reflect.String},
{Name: "VmPTE", Godoc: "page table entries size", Kind: reflect.String},
{Name: "VmPMD", Godoc: "size of second-level page tables", Kind: reflect.String},
{Name: "VmSwap", Godoc: "swapped-out virtual memory size by anonymous private", Kind: reflect.String},
{Name: "HugetlbPages", Godoc: "size of hugetlb memory portions", Kind: reflect.String},
{Name: "Threads", Godoc: "number of threads in process containing this thread (process)", Kind: reflect.Uint64},
{Name: "SigQ", Godoc: "queued signals for the real user ID of this process (queued signals / limits)", Kind: reflect.String},
{Name: "SigPnd", Godoc: "number of signals pending for thread", Kind: reflect.String},
{Name: "ShdPnd", Godoc: "number of signals pending for process as a whole", Kind: reflect.String},
{Name: "SigBlk", Godoc: "masks indicating signals being blocked", Kind: reflect.String},
{Name: "SigIgn", Godoc: "masks indicating signals being ignored", Kind: reflect.String},
{Name: "SigCgt", Godoc: "masks indicating signals being caught", Kind: reflect.String},
{Name: "CapInh", Godoc: "masks of capabilities enabled in inheritable sets", Kind: reflect.String},
{Name: "CapPrm", Godoc: "masks of capabilities enabled in permitted sets", Kind: reflect.String},
{Name: "CapEff", Godoc: "masks of capabilities enabled in effective sets", Kind: reflect.String},
{Name: "CapBnd", Godoc: "capability Bounding set", Kind: reflect.String},
{Name: "CapAmb", Godoc: "ambient capability set", Kind: reflect.String},
{Name: "Seccomp", Godoc: "seccomp mode of the process (0 means SECCOMP_MODE_DISABLED; 1 means SECCOMP_MODE_STRICT; 2 means SECCOMP_MODE_FILTER)", Kind: reflect.Uint64},
{Name: "Cpus_allowed", Godoc: "mask of CPUs on which this process may run", Kind: reflect.String},
{Name: "Cpus_allowed_list", Godoc: "list of CPUs on which this process may run", Kind: reflect.String},
{Name: "Mems_allowed", Godoc: "mask of memory nodes allowed to this process", Kind: reflect.String},
{Name: "Mems_allowed_list", Godoc: "list of memory nodes allowed to this process", Kind: reflect.String},
{Name: "voluntary_ctxt_switches", Godoc: "number of voluntary context switches", Kind: reflect.Uint64},
{Name: "nonvoluntary_ctxt_switches", Godoc: "number of involuntary context switches", Kind: reflect.Uint64},
},
ColumnsToParse: map[string]schema.RawDataType{
"State": schema.TypeStatus,
"VmPeak": schema.TypeBytes,
"VmSize": schema.TypeBytes,
"VmLck": schema.TypeBytes,
"VmPin": schema.TypeBytes,
"VmHWM": schema.TypeBytes,
"VmRSS": schema.TypeBytes,
"VmData": schema.TypeBytes,
"VmStk": schema.TypeBytes,
"VmExe": schema.TypeBytes,
"VmLib": schema.TypeBytes,
"VmPTE": schema.TypeBytes,
"VmPMD": schema.TypeBytes,
"VmSwap": schema.TypeBytes,
"HugetlbPages": schema.TypeBytes,
},
}

225
vendor/github.com/gyuho/linux-inspect/proc/stat.go generated vendored Normal file
View File

@ -0,0 +1,225 @@
package proc
import (
"bufio"
"bytes"
"fmt"
"html/template"
"io/ioutil"
"log"
"reflect"
"strconv"
"strings"
"github.com/gyuho/linux-inspect/pkg/fileutil"
"github.com/gyuho/linux-inspect/schema"
"github.com/dustin/go-humanize"
)
// GetStatByPID reads '/proc/$PID/stat' data.
func GetStatByPID(pid int64) (s Stat, err error) {
var d []byte
d, err = readStat(pid)
if err != nil {
return Stat{}, err
}
return parseStat(d)
}
func readStat(pid int64) ([]byte, error) {
fpath := fmt.Sprintf("/proc/%d/stat", pid)
f, err := fileutil.OpenToRead(fpath)
if err != nil {
return nil, err
}
err = f.Close()
b, berr := ioutil.ReadAll(f)
if err != nil {
berr = fmt.Errorf("%v; %v", err, berr)
}
return b, berr
}
func parseStat(d []byte) (s Stat, err error) {
scanner := bufio.NewScanner(bytes.NewReader(d))
for scanner.Scan() {
txt := scanner.Text()
if len(txt) == 0 {
continue
}
fds := strings.Fields(txt)
for i, fv := range fds {
column := schema.ToField(StatSchema.Columns[i].Name)
val := reflect.ValueOf(&s).Elem()
if val.Kind() == reflect.Struct {
f := val.FieldByName(column)
if f.IsValid() {
if f.CanSet() {
switch StatSchema.Columns[i].Kind {
case reflect.Uint64:
uv, uerr := strconv.ParseUint(fv, 10, 64)
if uerr != nil {
return Stat{}, fmt.Errorf("%v when parsing %s %v", uerr, column, fv)
}
if !f.OverflowUint(uv) {
f.SetUint(uv)
fval := val.FieldByName(column + "BytesN")
if fval.IsValid() {
if fval.CanSet() {
fval.SetUint(uv)
}
}
if vv, ok := StatSchema.ColumnsToParse[StatSchema.Columns[i].Name]; ok {
switch vv {
case schema.TypeBytes:
hF := val.FieldByName(column + "ParsedBytes")
if hF.IsValid() {
if hF.CanSet() {
hF.SetString(humanize.Bytes(uv))
}
}
}
}
}
case reflect.Int64:
iv, ierr := strconv.ParseInt(fv, 10, 64)
if ierr != nil {
return Stat{}, fmt.Errorf("%v when parsing %s %v", ierr, column, fv)
}
if !f.OverflowInt(iv) {
f.SetInt(iv)
fval := val.FieldByName(column + "BytesN")
if fval.IsValid() {
if fval.CanSet() {
fval.SetInt(iv)
}
}
if vv, ok := StatSchema.ColumnsToParse[StatSchema.Columns[i].Name]; ok {
switch vv {
case schema.TypeBytes:
fval := val.FieldByName(column + "ParsedBytes")
if fval.IsValid() {
if fval.CanSet() {
fval.SetString(humanize.Bytes(uint64(iv)))
}
}
}
}
}
case reflect.String:
f.SetString(fv)
if vv, ok := StatSchema.ColumnsToParse[StatSchema.Columns[i].Name]; ok {
switch vv {
case schema.TypeStatus:
fval := val.FieldByName(column + "ParsedStatus")
if fval.IsValid() {
if fval.CanSet() {
fval.SetString(convertStatus(fv))
}
}
}
}
}
}
}
}
}
}
if err = scanner.Err(); err != nil {
return s, err
}
if strings.HasPrefix(s.Comm, "(") {
s.Comm = s.Comm[1:]
}
if strings.HasSuffix(s.Comm, ")") {
s.Comm = s.Comm[:len(s.Comm)-1]
}
return s, err
}
const statTmpl = `
----------------------------------------
[/proc/{{.Pid}}/stat]
Name: {{.Comm}}
State: {{.StateParsedStatus}}
Pid: {{.Pid}}
Ppid: {{.Ppid}}
NumThreads: {{.NumThreads}}
Rss: {{.RssParsedBytes}} ({{.RssBytesN}})
Rsslim: {{.RsslimParsedBytes}} ({{.RsslimBytesN}})
Vsize: {{.VsizeParsedBytes}} ({{.VsizeBytesN}})
Starttime: {{.Starttime}}
Utime: {{.Utime}}
Stime: {{.Stime}}
Cutime: {{.Cutime}}
Cstime: {{.Cstime}}
Session: {{.Session}}
TtyNr: {{.TtyNr}}
Tpgid: {{.Tpgid}}
Flags: {{.Flags}}
minflt: {{.Minflt}}
cminflt: {{.Cminflt}}
majflt: {{.Majflt}}
cmajflt: {{.Cmajflt}}
priority: {{.Priority}}
nice: {{.Nice}}
itrealvalue: {{.Itrealvalue}}
startcode: {{.Startcode}}
endcode: {{.Endcode}}
startstack: {{.Startstack}}
lstkesp: {{.Kstkesp}}
lstkeip: {{.Kstkeip}}
signal: {{.Signal}}
blocked: {{.Blocked}}
sigignore: {{.Sigignore}}
sigcatch: {{.Sigcatch}}
wchan: {{.Wchan}}
nswap: {{.Nswap}}
cnswap: {{.Cnswap}}
exitSignal: {{.ExitSignal}}
processor: {{.Processor}}
rt_priority: {{.RtPriority}}
policy: {{.Policy}}
delayacct_blkio_ticks:
{{.DelayacctBlkioTicks}}
guest_time: {{.GuestTime}}
cguest_time: {{.CguestTime}}
start_data: {{.StartData}}
end_data: {{.EndData}}
start_brk: {{.StartBrk}}
arg_start: {{.ArgStart}}
arg_end: {{.ArgEnd}}
env_start: {{.EnvStart}}
env_end: {{.EnvEnd}}
exit_code: {{.ExitCode}}
----------------------------------------
`
func (s Stat) String() string {
tpl := template.Must(template.New("statTmpl").Parse(statTmpl))
buf := new(bytes.Buffer)
if err := tpl.Execute(buf, s); err != nil {
log.Fatal(err)
}
return buf.String()
}

174
vendor/github.com/gyuho/linux-inspect/proc/status.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
package proc
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"strings"
"text/template"
"github.com/gyuho/linux-inspect/pkg/fileutil"
"github.com/dustin/go-humanize"
"gopkg.in/yaml.v2"
)
// GetStatusByPID reads '/proc/$PID/status' data.
func GetStatusByPID(pid int64) (s Status, err error) {
d, derr := readStatus(pid)
if derr != nil {
return Status{}, derr
}
s, err = parseStatus(d)
if err != nil {
return s, err
}
s.StateParsedStatus = strings.TrimSpace(s.State)
u, _ := humanize.ParseBytes(s.VmPeak)
s.VmPeakBytesN = u
s.VmPeakParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(s.VmSize)
s.VmSizeBytesN = u
s.VmSizeParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(s.VmLck)
s.VmLckBytesN = u
s.VmLckParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(s.VmPin)
s.VmPinBytesN = u
s.VmPinParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(s.VmHWM)
s.VmHWMBytesN = u
s.VmHWMParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(s.VmRSS)
s.VmRSSBytesN = u
s.VmRSSParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(s.VmData)
s.VmDataBytesN = u
s.VmDataParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(s.VmStk)
s.VmStkBytesN = u
s.VmStkParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(s.VmExe)
s.VmExeBytesN = u
s.VmExeParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(s.VmLib)
s.VmLibBytesN = u
s.VmLibParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(s.VmPTE)
s.VmPTEBytesN = u
s.VmPTEParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(s.VmPMD)
s.VmPMDBytesN = u
s.VmPMDParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(s.VmSwap)
s.VmSwapBytesN = u
s.VmSwapParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(s.HugetlbPages)
s.HugetlbPagesBytesN = u
s.HugetlbPagesParsedBytes = humanize.Bytes(u)
return s, nil
}
func readStatus(pid int64) ([]byte, error) {
fpath := fmt.Sprintf("/proc/%d/status", pid)
f, err := fileutil.OpenToRead(fpath)
if err != nil {
return nil, err
}
err = f.Close()
b, berr := ioutil.ReadAll(f)
if err != nil {
berr = fmt.Errorf("%v; %v", err, berr)
}
return b, berr
}
func parseStatus(d []byte) (s Status, err error) {
err = yaml.Unmarshal(d, &s)
return s, err
}
const statusTmpl = `
----------------------------------------
[/proc/{{.Pid}}/status]
Name: {{.Name}}
Umask: {{.Umask}}
State: {{.StateParsedStatus}}
Tgid: {{.Tgid}}
Ngid: {{.Ngid}}
Pid: {{.Pid}}
PPid: {{.PPid}}
TracerPid: {{.TracerPid}}
FDSize: {{.FDSize}}
VmPeak: {{.VmPeakParsedBytes}}
VmSize: {{.VmSizeParsedBytes}}
VmLck: {{.VmLckParsedBytes}}
VmPin: {{.VmPinParsedBytes}}
VmHWM: {{.VmHWMParsedBytes}}
VmRSS: {{.VmRSSParsedBytes}}
VmData: {{.VmDataParsedBytes}}
VmStk: {{.VmStkParsedBytes}}
VmExe: {{.VmExeParsedBytes}}
VmLib: {{.VmLibParsedBytes}}
VmPTE: {{.VmPTEParsedBytes}}
VmPMD: {{.VmPMDParsedBytes}}
VmSwap: {{.VmSwapParsedBytes}}
HugetlbPages: {{.HugetlbPagesParsedBytes}}
Threads: {{.Threads}}
Groups: {{.Groups}}
Uid: {{.Uid}}
Gid: {{.Gid}}
SigQ: {{.SigQ}}
SigPnd: {{.SigPnd}}
ShdPnd: {{.ShdPnd}}
SigBlk: {{.SigBlk}}
SigIgn: {{.SigIgn}}
SigCgt: {{.SigCgt}}
CapInh: {{.CapInh}}
CapPrm: {{.CapPrm}}
CapEff: {{.CapEff}}
CapBnd: {{.CapBnd}}
Seccomp: {{.Seccomp}}
Cpus_allowed: {{.CpusAllowed}}
Cpus_allowed_list: {{.CpusAllowedList}}
Mems_allowed: {{.MemsAllowed}}
Mems_allowed_list: {{.MemsAllowedList}}
voluntary_ctxt_switches:
{{.VoluntaryCtxtSwitches}}
nonvoluntary_ctxt_switches:
{{.NonvoluntaryCtxtSwitches}}
----------------------------------------
`
func (s Status) String() string {
tpl := template.Must(template.New("statusTmpl").Parse(statusTmpl))
buf := new(bytes.Buffer)
if err := tpl.Execute(buf, s); err != nil {
log.Fatal(err)
}
return buf.String()
}
// GetProgram returns the program name.
func GetProgram(pid int64) (string, error) {
// Readlink needs root permission
// return os.Readlink(fmt.Sprintf("/proc/%d/exe", pid))
s, err := GetStatusByPID(pid)
return s.Name, err
}

View File

@ -1,14 +1,17 @@
package psn package proc
import ( import (
"io/ioutil" "io/ioutil"
"strconv" "strconv"
"strings" "strings"
"github.com/gyuho/linux-inspect/pkg/fileutil"
"github.com/gyuho/linux-inspect/pkg/timeutil"
) )
// GetProcUptime reads '/proc/uptime'. // GetUptime reads '/proc/uptime'.
func GetProcUptime() (Uptime, error) { func GetUptime() (Uptime, error) {
f, err := openToRead("/proc/uptime") f, err := fileutil.OpenToRead("/proc/uptime")
if err != nil { if err != nil {
return Uptime{}, err return Uptime{}, err
} }
@ -27,7 +30,7 @@ func GetProcUptime() (Uptime, error) {
return Uptime{}, err return Uptime{}, err
} }
u.UptimeTotal = v u.UptimeTotal = v
u.UptimeTotalParsedTime = humanizeDurationSecond(uint64(v)) u.UptimeTotalParsedTime = timeutil.HumanizeDurationSecond(uint64(v))
} }
if len(fields) > 1 { if len(fields) > 1 {
v, err := strconv.ParseFloat(fields[1], 64) v, err := strconv.ParseFloat(fields[1], 64)
@ -35,7 +38,7 @@ func GetProcUptime() (Uptime, error) {
return Uptime{}, err return Uptime{}, err
} }
u.UptimeIdle = v u.UptimeIdle = v
u.UptimeIdleParsedTime = humanizeDurationSecond(uint64(v)) u.UptimeIdleParsedTime = timeutil.HumanizeDurationSecond(uint64(v))
} }
return u, nil return u, nil
} }

35
vendor/github.com/gyuho/linux-inspect/proc/utils.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
package proc
import (
"fmt"
"strconv"
"strings"
)
func convertStatus(s string) string {
ns := strings.TrimSpace(s)
if len(s) > 1 {
ns = ns[:1]
}
switch ns {
case "D":
return "D (uninterruptible sleep)"
case "R":
return "R (running)"
case "S":
return "S (sleeping)"
case "T":
return "T (stopped by job control signal)"
case "t":
return "t (stopped by debugger during trace)"
case "Z":
return "Z (zombie)"
default:
return fmt.Sprintf("unknown process %q", s)
}
}
func isInt(s string) bool {
_, err := strconv.Atoi(s)
return err == nil
}

View File

@ -1,317 +0,0 @@
package psn
import (
"bufio"
"bytes"
"fmt"
"io"
"os/exec"
"path/filepath"
"reflect"
"strconv"
"strings"
humanize "github.com/dustin/go-humanize"
)
// GetDf returns entries in 'df' command.
// Pass '' target to list all information.
func GetDf(dfPath string, target string) ([]DfCommandRow, error) {
o, err := ReadDf(dfPath, target)
if err != nil {
return nil, err
}
return ParseDfOutput(o)
}
// GetDfDefault returns entries in 'df' command.
// Pass '' target to list all information.
func GetDfDefault(target string) ([]DfCommandRow, error) {
o, err := ReadDf(DefaultDfPath, target)
if err != nil {
return nil, err
}
return ParseDfOutput(o)
}
// DefaultDfPath is the default 'df' command path.
var DefaultDfPath = "/bin/df"
// DfFlags is 'df --all --sync --block-size=1024 --output=source,target,fstype,file,itotal,iavail,iused,ipcent,size,avail,used,pcent'.
var DfFlags = []string{"--all", "--sync", "--block-size=1024", "--output=source,target,fstype,file,itotal,iavail,iused,ipcent,size,avail,used,pcent"}
// ReadDfDefault reads Linux 'df' command output.
// Pass '' target to list all information.
func ReadDfDefault(target string) (string, error) {
return ReadDf(DefaultDfPath, target)
}
// ReadDf reads Linux 'df' command output.
// Pass '' target to list all information.
func ReadDf(dfPath string, target string) (string, error) {
buf := new(bytes.Buffer)
err := readDf(dfPath, target, buf)
o := strings.TrimSpace(buf.String())
return o, err
}
func readDf(dfPath string, target string, w io.Writer) error {
if !exist(dfPath) {
return fmt.Errorf("%q does not exist", dfPath)
}
if target != "" {
DfFlags = append(DfFlags, strings.TrimSpace(target))
}
cmd := exec.Command(dfPath, DfFlags...)
cmd.Stdout = w
cmd.Stderr = w
return cmd.Run()
}
// DfRowHeaders is the headers in 'df' output.
var DfRowHeaders = []string{
"Filesystem",
// Mounted on
"Mounted",
"on",
"Type",
"File",
"Inodes",
"IFree",
"IUsed",
"IUse%",
"1K-blocks",
"Avail",
"Used",
"Use%",
}
type dfCommandOutpudrowIdx int
const (
df_command_output_row_idx_file_system dfCommandOutpudrowIdx = iota
df_command_output_row_idx_mounted_on
df_command_output_row_idx_file_system_type
df_command_output_row_idx_file
df_command_output_row_idx_inodes
df_command_output_row_idx_ifree
df_command_output_row_idx_iused
df_command_output_row_idx_iused_percent
df_command_output_row_idx_total_blocks
df_command_output_row_idx_available_blocks
df_command_output_row_idx_used_blocks
df_command_output_row_idx_used_blocks_percentage
)
// ParseDfOutput parses 'df' command output and returns the rows.
func ParseDfOutput(s string) ([]DfCommandRow, error) {
lines := strings.Split(s, "\n")
rows := make([][]string, 0, len(lines))
headerFound := false
for _, line := range lines {
if len(line) == 0 {
continue
}
ds := strings.Fields(strings.TrimSpace(line))
if ds[0] == "Filesystem" { // header line
if !reflect.DeepEqual(ds, DfRowHeaders) {
return nil, fmt.Errorf("unexpected 'df' command header order (%v, expected %v, output: %q)", ds, DfRowHeaders, s)
}
headerFound = true
continue
}
if !headerFound {
continue
}
row := strings.Fields(strings.TrimSpace(line))
if len(row) != len(DfRowHeaders)-1 {
return nil, fmt.Errorf("unexpected row column number %v (expected %v)", row, DfRowHeaders)
}
rows = append(rows, row)
}
type result struct {
row DfCommandRow
err error
}
rc := make(chan result, len(rows))
for _, row := range rows {
go func(row []string) {
tr, err := parseDfRow(row)
rc <- result{row: tr, err: err}
}(row)
}
tcRows := make([]DfCommandRow, 0, len(rows))
for len(tcRows) != len(rows) {
select {
case rs := <-rc:
if rs.err != nil {
return nil, rs.err
}
tcRows = append(tcRows, rs.row)
}
}
rm := make(map[string]DfCommandRow)
for _, row := range tcRows {
rm[row.MountedOn] = row
}
rrs := make([]DfCommandRow, 0, len(rm))
for _, row := range rm {
rrs = append(rrs, row)
}
return rrs, nil
}
func parseDfRow(row []string) (DfCommandRow, error) {
drow := DfCommandRow{
FileSystem: strings.TrimSpace(row[df_command_output_row_idx_file_system]),
MountedOn: strings.TrimSpace(row[df_command_output_row_idx_mounted_on]),
FileSystemType: strings.TrimSpace(row[df_command_output_row_idx_file_system_type]),
File: strings.TrimSpace(row[df_command_output_row_idx_file]),
IusedPercent: strings.TrimSpace(strings.Replace(row[df_command_output_row_idx_iused_percent], "%", " %", -1)),
UsedBlocksPercent: strings.TrimSpace(strings.Replace(row[df_command_output_row_idx_used_blocks_percentage], "%", " %", -1)),
}
drow.Device = filepath.Base(drow.FileSystem)
ptxt := strings.TrimSpace(row[df_command_output_row_idx_inodes])
if ptxt == "-" {
ptxt = "0"
}
iv, err := strconv.ParseInt(ptxt, 10, 64)
if err != nil {
return DfCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
drow.Inodes = iv
ptxt = strings.TrimSpace(row[df_command_output_row_idx_ifree])
if ptxt == "-" {
ptxt = "0"
}
iv, err = strconv.ParseInt(ptxt, 10, 64)
if err != nil {
return DfCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
drow.Ifree = iv
ptxt = strings.TrimSpace(row[df_command_output_row_idx_iused])
if ptxt == "-" {
ptxt = "0"
}
iv, err = strconv.ParseInt(ptxt, 10, 64)
if err != nil {
return DfCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
drow.Iused = iv
ptxt = strings.TrimSpace(row[df_command_output_row_idx_total_blocks])
if ptxt == "-" {
ptxt = "0"
}
iv, err = strconv.ParseInt(ptxt, 10, 64)
if err != nil {
return DfCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
drow.TotalBlocks = iv
drow.TotalBlocksBytesN = iv * 1024
drow.TotalBlocksParsedBytes = humanize.Bytes(uint64(drow.TotalBlocksBytesN))
ptxt = strings.TrimSpace(row[df_command_output_row_idx_available_blocks])
if ptxt == "-" {
ptxt = "0"
}
iv, err = strconv.ParseInt(ptxt, 10, 64)
if err != nil {
return DfCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
drow.AvailableBlocks = iv
drow.AvailableBlocksBytesN = iv * 1024
drow.AvailableBlocksParsedBytes = humanize.Bytes(uint64(drow.AvailableBlocksBytesN))
ptxt = strings.TrimSpace(row[df_command_output_row_idx_used_blocks])
if ptxt == "-" {
ptxt = "0"
}
iv, err = strconv.ParseInt(ptxt, 10, 64)
if err != nil {
return DfCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row)
}
drow.UsedBlocks = iv
drow.UsedBlocksBytesN = iv * 1024
drow.UsedBlocksParsedBytes = humanize.Bytes(uint64(drow.UsedBlocksBytesN))
return drow, nil
}
type etcMtabColumnIndex int
const (
etc_mtab_idx_file_system etcMtabColumnIndex = iota
etc_mtab_idx_mounted_on
etc_mtab_idx_file_system_type
etc_mtab_idx_options
etc_mtab_idx_dump
etc_mtab_idx_pass
)
// GetEtcMtab returns '/etc/mtab' information.
func GetEtcMtab() ([]Mtab, error) {
f, err := openToRead("/etc/mtab")
if err != nil {
return nil, err
}
defer f.Close()
mss := []Mtab{}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
txt := scanner.Text()
if len(txt) == 0 {
continue
}
ms := strings.Fields(strings.TrimSpace(txt))
if len(ms) < int(etc_mtab_idx_pass+1) {
return nil, fmt.Errorf("not enough columns at %v", ms)
}
m := Mtab{
FileSystem: strings.TrimSpace(ms[etc_mtab_idx_file_system]),
MountedOn: strings.TrimSpace(ms[etc_mtab_idx_mounted_on]),
FileSystemType: strings.TrimSpace(ms[etc_mtab_idx_file_system_type]),
Options: strings.TrimSpace(ms[etc_mtab_idx_options]),
}
mn, err := strconv.ParseInt(ms[etc_mtab_idx_dump], 10, 64)
if err != nil {
return nil, err
}
m.Dump = int(mn)
mn, err = strconv.ParseInt(ms[etc_mtab_idx_dump], 10, 64)
if err != nil {
return nil, err
}
m.Pass = int(mn)
mss = append(mss, m)
}
return mss, nil
}
// GetDevice returns the device name where dir is mounted.
func GetDevice(target string) (string, error) {
drows, err := GetDfDefault(target)
if err != nil {
return "", err
}
if len(drows) != 1 {
return "", fmt.Errorf("expected 1 df row at %q (got %+v)", target, drows)
}
return drows[0].Device, nil
}

View File

@ -1,3 +0,0 @@
// Package psn provides utilities to programmatically
// inspect Linux processes, sockets (ps, ss, netstat).
package psn

View File

@ -1,132 +0,0 @@
package psn
import (
"fmt"
"strings"
)
// EntryFilter defines entry filter.
type EntryFilter struct {
ProgramMatchFunc func(string) bool
program string
PID int64
TopLimit int
// for ss
TCP bool
TCP6 bool
LocalPort int64
RemotePort int64
// for ps
TopCommandPath string
TopStream *TopStream
// for Proc
DiskDevice string
NetworkInterface string
ExtraPath string
}
// FilterFunc applies each filter.
type FilterFunc func(*EntryFilter)
// WithProgramMatch matches command name.
func WithProgramMatch(matchFunc func(string) bool) FilterFunc {
return func(ft *EntryFilter) { ft.ProgramMatchFunc = matchFunc }
}
// WithProgram to filter entries by program name.
func WithProgram(name string) FilterFunc {
return func(ft *EntryFilter) {
ft.ProgramMatchFunc = func(commandName string) bool {
return strings.HasSuffix(commandName, name)
}
ft.program = name
}
}
// WithPID to filter entries by PIDs.
func WithPID(pid int64) FilterFunc {
return func(ft *EntryFilter) { ft.PID = pid }
}
// WithTopLimit to filter entries with limit.
func WithTopLimit(limit int) FilterFunc {
return func(ft *EntryFilter) { ft.TopLimit = limit }
}
// WithLocalPort to filter entries by local port.
func WithLocalPort(port int64) FilterFunc {
return func(ft *EntryFilter) { ft.LocalPort = port }
}
// WithRemotePort to filter entries by remote port.
func WithRemotePort(port int64) FilterFunc {
return func(ft *EntryFilter) { ft.RemotePort = port }
}
// WithTCP to filter entries by TCP.
// Can be used with 'WithTCP6'.
func WithTCP() FilterFunc {
return func(ft *EntryFilter) { ft.TCP = true }
}
// WithTCP6 to filter entries by TCP6.
// Can be used with 'WithTCP'.
func WithTCP6() FilterFunc {
return func(ft *EntryFilter) { ft.TCP6 = true }
}
// WithTopCommandPath configures 'top' command path.
func WithTopCommandPath(path string) FilterFunc {
return func(ft *EntryFilter) { ft.TopCommandPath = path }
}
// WithTopStream gets the PSEntry from the 'top' stream.
func WithTopStream(str *TopStream) FilterFunc {
return func(ft *EntryFilter) { ft.TopStream = str }
}
// WithDiskDevice to filter entries by disk device.
func WithDiskDevice(name string) FilterFunc {
return func(ft *EntryFilter) { ft.DiskDevice = name }
}
// WithNetworkInterface to filter entries by disk device.
func WithNetworkInterface(name string) FilterFunc {
return func(ft *EntryFilter) { ft.NetworkInterface = name }
}
// WithExtraPath to filter entries by disk device.
func WithExtraPath(path string) FilterFunc {
return func(ft *EntryFilter) { ft.ExtraPath = path }
}
// applyOpts panics when ft.Program != "" && ft.PID > 0.
func (ft *EntryFilter) applyOpts(opts []FilterFunc) {
for _, opt := range opts {
opt(ft)
}
if ft.DiskDevice != "" || ft.NetworkInterface != "" || ft.ExtraPath != "" {
if (ft.program != "" || ft.ProgramMatchFunc != nil) || ft.TopLimit > 0 || ft.LocalPort > 0 || ft.RemotePort > 0 || ft.TCP || ft.TCP6 {
panic(fmt.Errorf("not-valid Proc fileter; disk device %q or network interface %q or extra path %q", ft.DiskDevice, ft.NetworkInterface, ft.ExtraPath))
}
}
if (ft.program != "" || ft.ProgramMatchFunc != nil) && ft.PID > 0 {
panic(fmt.Errorf("can't filter both by program(%q or %p) and PID(%d)", ft.program, ft.ProgramMatchFunc, ft.PID))
}
if !ft.TCP && !ft.TCP6 {
// choose both
ft.TCP, ft.TCP6 = true, true
}
if ft.LocalPort > 0 && ft.RemotePort > 0 {
panic(fmt.Errorf("can't query by both local(%d) and remote(%d) ports", ft.LocalPort, ft.RemotePort))
}
if ft.TopCommandPath == "" {
ft.TopCommandPath = DefaultTopPath
}
}

View File

@ -1,141 +0,0 @@
package psn
import (
"bufio"
"fmt"
"strconv"
"strings"
)
type procDiskstatsColumnIndex int
const (
proc_diskstats_idx_major_number procDiskstatsColumnIndex = iota
proc_diskstats_idx_minor_number
proc_diskstats_idx_device_name
proc_diskstats_idx_reads_completed
proc_diskstats_idx_reads_merged
proc_diskstats_idx_sectors_read
proc_diskstats_idx_time_spent_on_reading_ms
proc_diskstats_idx_writes_completed
proc_diskstats_idx_writes_merged
proc_diskstats_idx_sectors_written
proc_diskstats_idx_time_spent_on_writing_ms
proc_diskstats_idx_ios_in_progress
proc_diskstats_idx_time_spent_on_ios_ms
proc_diskstats_idx_weighted_time_spent_on_ios_ms
)
// GetProcDiskstats reads '/proc/diskstats'.
func GetProcDiskstats() ([]DiskStat, error) {
f, err := openToRead("/proc/diskstats")
if err != nil {
return nil, err
}
defer f.Close()
dss := []DiskStat{}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
txt := scanner.Text()
if len(txt) == 0 {
continue
}
ds := strings.Fields(strings.TrimSpace(txt))
if len(ds) < int(proc_diskstats_idx_weighted_time_spent_on_ios_ms+1) {
return nil, fmt.Errorf("not enough columns at %v", ds)
}
d := DiskStat{}
mn, err := strconv.ParseUint(ds[proc_diskstats_idx_major_number], 10, 64)
if err != nil {
return nil, err
}
d.MajorNumber = mn
mn, err = strconv.ParseUint(ds[proc_diskstats_idx_minor_number], 10, 64)
if err != nil {
return nil, err
}
d.MinorNumber = mn
d.DeviceName = strings.TrimSpace(ds[proc_diskstats_idx_device_name])
mn, err = strconv.ParseUint(ds[proc_diskstats_idx_reads_completed], 10, 64)
if err != nil {
return nil, err
}
d.ReadsCompleted = mn
mn, err = strconv.ParseUint(ds[proc_diskstats_idx_reads_merged], 10, 64)
if err != nil {
return nil, err
}
d.ReadsMerged = mn
mn, err = strconv.ParseUint(ds[proc_diskstats_idx_sectors_read], 10, 64)
if err != nil {
return nil, err
}
d.SectorsRead = mn
mn, err = strconv.ParseUint(ds[proc_diskstats_idx_time_spent_on_reading_ms], 10, 64)
if err != nil {
return nil, err
}
d.TimeSpentOnReadingMs = mn
d.TimeSpentOnReadingMsParsedTime = humanizeDurationMs(mn)
mn, err = strconv.ParseUint(ds[proc_diskstats_idx_writes_completed], 10, 64)
if err != nil {
return nil, err
}
d.WritesCompleted = mn
mn, err = strconv.ParseUint(ds[proc_diskstats_idx_writes_merged], 10, 64)
if err != nil {
return nil, err
}
d.WritesMerged = mn
mn, err = strconv.ParseUint(ds[proc_diskstats_idx_sectors_written], 10, 64)
if err != nil {
return nil, err
}
d.SectorsWritten = mn
mn, err = strconv.ParseUint(ds[proc_diskstats_idx_time_spent_on_writing_ms], 10, 64)
if err != nil {
return nil, err
}
d.TimeSpentOnWritingMs = mn
d.TimeSpentOnWritingMsParsedTime = humanizeDurationMs(mn)
mn, err = strconv.ParseUint(ds[proc_diskstats_idx_ios_in_progress], 10, 64)
if err != nil {
return nil, err
}
d.IOsInProgress = mn
mn, err = strconv.ParseUint(ds[proc_diskstats_idx_time_spent_on_ios_ms], 10, 64)
if err != nil {
return nil, err
}
d.TimeSpentOnIOsMs = mn
d.TimeSpentOnIOsMsParsedTime = humanizeDurationMs(mn)
mn, err = strconv.ParseUint(ds[proc_diskstats_idx_weighted_time_spent_on_ios_ms], 10, 64)
if err != nil {
return nil, err
}
d.WeightedTimeSpentOnIOsMs = mn
d.WeightedTimeSpentOnIOsMsParsedTime = humanizeDurationMs(mn)
dss = append(dss, d)
}
return dss, nil
}

View File

@ -1,169 +0,0 @@
package psn
import (
"bufio"
"fmt"
"strconv"
"strings"
humanize "github.com/dustin/go-humanize"
)
type procNetDevColumnIndex int
const (
proc_net_dev_idx_interface procNetDevColumnIndex = iota
proc_net_dev_idx_receive_bytes
proc_net_dev_idx_receive_packets
proc_net_dev_idx_receive_errs
proc_net_dev_idx_receive_drop
proc_net_dev_idx_receive_fifo
proc_net_dev_idx_receive_frame
proc_net_dev_idx_receive_compressed
proc_net_dev_idx_receive_multicast
proc_net_dev_idx_transmit_bytes
proc_net_dev_idx_transmit_packets
proc_net_dev_idx_transmit_errs
proc_net_dev_idx_transmit_drop
proc_net_dev_idx_transmit_fifo
proc_net_dev_idx_transmit_colls
proc_net_dev_idx_transmit_carrier
)
// GetProcNetDev reads '/proc/net/dev'.
func GetProcNetDev() ([]NetDev, error) {
f, err := openToRead("/proc/net/dev")
if err != nil {
return nil, err
}
defer f.Close()
header := true
dss := []NetDev{}
scanner := bufio.NewScanner(f)
for scanner.Scan() {
txt := scanner.Text()
if len(txt) == 0 {
continue
}
ds := strings.Fields(strings.TrimSpace(txt))
if header {
if strings.HasPrefix(ds[0], "Inter") {
continue
}
if strings.HasSuffix(ds[0], "face") {
header = false
continue
}
}
if len(ds) < int(proc_net_dev_idx_transmit_carrier+1) {
return nil, fmt.Errorf("not enough columns at %v", ds)
}
d := NetDev{}
d.Interface = strings.TrimSpace(ds[proc_net_dev_idx_interface])
d.Interface = d.Interface[:len(d.Interface)-1] // remove ':' from 'wlp2s0:'
mn, err := strconv.ParseUint(ds[proc_net_dev_idx_receive_bytes], 10, 64)
if err != nil {
return nil, err
}
d.ReceiveBytes = mn
d.ReceiveBytesBytesN = mn
d.ReceiveBytesParsedBytes = humanize.Bytes(mn)
mn, err = strconv.ParseUint(ds[proc_net_dev_idx_transmit_bytes], 10, 64)
if err != nil {
return nil, err
}
d.TransmitBytes = mn
d.TransmitBytesBytesN = mn
d.TransmitBytesParsedBytes = humanize.Bytes(mn)
mn, err = strconv.ParseUint(ds[proc_net_dev_idx_receive_packets], 10, 64)
if err != nil {
return nil, err
}
d.ReceivePackets = mn
mn, err = strconv.ParseUint(ds[proc_net_dev_idx_receive_errs], 10, 64)
if err != nil {
return nil, err
}
d.ReceiveErrs = mn
mn, err = strconv.ParseUint(ds[proc_net_dev_idx_receive_drop], 10, 64)
if err != nil {
return nil, err
}
d.ReceiveDrop = mn
mn, err = strconv.ParseUint(ds[proc_net_dev_idx_receive_fifo], 10, 64)
if err != nil {
return nil, err
}
d.ReceiveFifo = mn
mn, err = strconv.ParseUint(ds[proc_net_dev_idx_receive_frame], 10, 64)
if err != nil {
return nil, err
}
d.ReceiveFrame = mn
mn, err = strconv.ParseUint(ds[proc_net_dev_idx_receive_compressed], 10, 64)
if err != nil {
return nil, err
}
d.ReceiveCompressed = mn
mn, err = strconv.ParseUint(ds[proc_net_dev_idx_receive_multicast], 10, 64)
if err != nil {
return nil, err
}
d.ReceiveMulticast = mn
mn, err = strconv.ParseUint(ds[proc_net_dev_idx_transmit_packets], 10, 64)
if err != nil {
return nil, err
}
d.TransmitPackets = mn
mn, err = strconv.ParseUint(ds[proc_net_dev_idx_transmit_errs], 10, 64)
if err != nil {
return nil, err
}
d.TransmitErrs = mn
mn, err = strconv.ParseUint(ds[proc_net_dev_idx_transmit_drop], 10, 64)
if err != nil {
return nil, err
}
d.TransmitDrop = mn
mn, err = strconv.ParseUint(ds[proc_net_dev_idx_transmit_fifo], 10, 64)
if err != nil {
return nil, err
}
d.TransmitFifo = mn
mn, err = strconv.ParseUint(ds[proc_net_dev_idx_transmit_colls], 10, 64)
if err != nil {
return nil, err
}
d.TransmitColls = mn
mn, err = strconv.ParseUint(ds[proc_net_dev_idx_transmit_carrier], 10, 64)
if err != nil {
return nil, err
}
d.TransmitCarrier = mn
dss = append(dss, d)
}
return dss, nil
}

View File

@ -1,243 +0,0 @@
package psn
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
"sync"
)
// TransportProtocol is tcp, tcp6.
type TransportProtocol int
const (
TypeTCP TransportProtocol = iota
TypeTCP6
)
func (tp TransportProtocol) String() string {
switch tp {
case TypeTCP:
return "tcp"
case TypeTCP6:
return "tcp6"
default:
panic(fmt.Errorf("unknown transport protocol %d", tp))
}
}
// GetProcNetTCPByPID reads '/proc/$PID/net/tcp(6)' data.
func GetProcNetTCPByPID(pid int64, tp TransportProtocol) (ss []NetTCP, err error) {
return parseProcNetTCPByPID(pid, tp)
}
type procNetColumnIndex int
const (
proc_net_tcp_idx_sl procNetColumnIndex = iota
proc_net_tcp_idx_local_address
proc_net_tcp_idx_remote_address
proc_net_tcp_idx_st
proc_net_tcp_idx_tx_queue_rx_queue
proc_net_tcp_idx_tr_tm_when
proc_net_tcp_idx_retrnsmt
proc_net_tcp_idx_uid
proc_net_tcp_idx_timeout
proc_net_tcp_idx_inode
)
var (
// RPC_SHOW_SOCK
// https://github.com/torvalds/linux/blob/master/include/trace/events/sunrpc.h
netTCPStatus = map[string]string{
"01": "ESTABLISHED",
"02": "SYN_SENT",
"03": "SYN_RECV",
"04": "FIN_WAIT1",
"05": "FIN_WAIT2",
"06": "TIME_WAIT",
"07": "CLOSE",
"08": "CLOSE_WAIT",
"09": "LAST_ACK",
"0A": "LISTEN",
"0B": "CLOSING",
}
)
func parseProcNetTCPByPID(pid int64, tp TransportProtocol) ([]NetTCP, error) {
fpath := fmt.Sprintf("/proc/%d/net/%s", pid, tp.String())
f, err := openToRead(fpath)
if err != nil {
return nil, err
}
defer f.Close()
rows := [][]string{}
first := true
scanner := bufio.NewScanner(f)
for scanner.Scan() {
txt := scanner.Text()
if len(txt) == 0 {
continue
}
fs := strings.Fields(txt)
if len(fs) < int(proc_net_tcp_idx_inode+1) {
return nil, fmt.Errorf("not enough columns at %v", fs)
}
if first {
if fs[0] != "sl" { // header
return nil, fmt.Errorf("first line must be columns but got = %#q", fs)
}
first = false
continue
}
row := make([]string, 10)
copy(row, fs[:proc_net_tcp_idx_inode+1])
rows = append(rows, row)
}
if err := scanner.Err(); err != nil {
return nil, err
}
var ipParse func(string) (string, int64, error)
switch tp {
case TypeTCP:
ipParse = parseLittleEndianIpv4
case TypeTCP6:
ipParse = parseLittleEndianIpv6
}
nch, errc := make(chan NetTCP), make(chan error)
for _, row := range rows {
go func(row []string) {
ns := NetTCP{}
ns.Type = tp.String()
sn, err := strconv.ParseUint(strings.Replace(row[proc_net_tcp_idx_sl], ":", "", -1), 10, 64)
if err != nil {
errc <- err
return
}
ns.Sl = sn
ns.LocalAddress = strings.TrimSpace(row[proc_net_tcp_idx_local_address])
lp, lt, err := ipParse(row[proc_net_tcp_idx_local_address])
if err != nil {
errc <- err
return
}
ns.LocalAddressParsedIPHost = strings.TrimSpace(lp)
ns.LocalAddressParsedIPPort = lt
ns.RemAddress = strings.TrimSpace(row[proc_net_tcp_idx_remote_address])
rp, rt, err := ipParse(row[proc_net_tcp_idx_remote_address])
if err != nil {
errc <- err
return
}
ns.RemAddressParsedIPHost = strings.TrimSpace(rp)
ns.RemAddressParsedIPPort = rt
ns.St = strings.TrimSpace(row[proc_net_tcp_idx_st])
ns.StParsedStatus = strings.TrimSpace(netTCPStatus[row[proc_net_tcp_idx_st]])
qs := strings.Split(row[proc_net_tcp_idx_tx_queue_rx_queue], ":")
if len(qs) == 2 {
ns.TxQueue = qs[0]
ns.RxQueue = qs[1]
}
trs := strings.Split(row[proc_net_tcp_idx_tr_tm_when], ":")
if len(trs) == 2 {
ns.Tr = trs[0]
ns.TmWhen = trs[1]
}
ns.Retrnsmt = row[proc_net_tcp_idx_retrnsmt]
un, err := strconv.ParseUint(row[proc_net_tcp_idx_uid], 10, 64)
if err != nil {
errc <- err
return
}
ns.Uid = un
to, err := strconv.ParseUint(row[proc_net_tcp_idx_timeout], 10, 64)
if err != nil {
errc <- err
return
}
ns.Timeout = to
ns.Inode = strings.TrimSpace(row[proc_net_tcp_idx_inode])
nch <- ns
}(row)
}
nss := make([]NetTCP, 0, len(rows))
cn, limit := 0, len(rows)
for cn != limit {
select {
case err := <-errc:
return nil, err
case p := <-nch:
nss = append(nss, p)
cn++
}
}
close(nch)
close(errc)
return nss, nil
}
// SearchInode finds the matching process to the given inode.
func SearchInode(fds []string, inode string) (pid int64) {
var mu sync.RWMutex
var wg sync.WaitGroup
wg.Add(len(fds))
for _, fd := range fds {
go func(fdpath string) {
defer wg.Done()
mu.RLock()
done := pid != 0
mu.RUnlock()
if done {
return
}
// '/proc/[pid]/fd' contains type:[inode]
sym, err := os.Readlink(fdpath)
if err != nil {
return
}
if !strings.Contains(strings.TrimSpace(sym), inode) {
return
}
pd, err := pidFromFd(fdpath)
if err != nil {
return
}
mu.Lock()
pid = pd
mu.Unlock()
}(fd)
}
wg.Wait()
if pid == 0 {
pid = -1
}
return
}

View File

@ -1,244 +0,0 @@
package psn
import (
"bufio"
"bytes"
"fmt"
"html/template"
"log"
"os/exec"
"reflect"
"strconv"
"strings"
"github.com/dustin/go-humanize"
"github.com/gyuho/linux-inspect/psn/schema"
)
// GetProcStatByPID reads '/proc/$PID/stat' data.
func GetProcStatByPID(pid int64, up Uptime) (s Stat, err error) {
return parseProcStat(pid, up)
}
func parseProcStat(pid int64, up Uptime) (Stat, error) {
fpath := fmt.Sprintf("/proc/%d/stat", pid)
f, err := openToRead(fpath)
if err != nil {
return Stat{}, err
}
defer f.Close()
scanner := bufio.NewScanner(f)
st := &Stat{}
for scanner.Scan() {
txt := scanner.Text()
if len(txt) == 0 {
continue
}
fds := strings.Fields(txt)
for i, fv := range fds {
column := schema.ToField(schema.Stat.Columns[i].Name)
s := reflect.ValueOf(st).Elem()
if s.Kind() == reflect.Struct {
f := s.FieldByName(column)
if f.IsValid() {
if f.CanSet() {
switch schema.Stat.Columns[i].Kind {
case reflect.Uint64:
value, err := strconv.ParseUint(fv, 10, 64)
if err != nil {
return Stat{}, fmt.Errorf("%v when parsing %s %v", err, column, fv)
}
if !f.OverflowUint(value) {
f.SetUint(value)
bF := s.FieldByName(column + "BytesN")
if bF.IsValid() {
if bF.CanSet() {
bF.SetUint(value)
}
}
if vv, ok := schema.Stat.ColumnsToParse[schema.Stat.Columns[i].Name]; ok {
switch vv {
case schema.TypeBytes:
hF := s.FieldByName(column + "ParsedBytes")
if hF.IsValid() {
if hF.CanSet() {
hF.SetString(humanize.Bytes(value))
}
}
}
}
}
case reflect.Int64:
value, err := strconv.ParseInt(fv, 10, 64)
if err != nil {
return Stat{}, fmt.Errorf("%v when parsing %s %v", err, column, fv)
}
if !f.OverflowInt(value) {
f.SetInt(value)
bF := s.FieldByName(column + "BytesN")
if bF.IsValid() {
if bF.CanSet() {
bF.SetInt(value)
}
}
if vv, ok := schema.Stat.ColumnsToParse[schema.Stat.Columns[i].Name]; ok {
switch vv {
case schema.TypeBytes:
hF := s.FieldByName(column + "ParsedBytes")
if hF.IsValid() {
if hF.CanSet() {
hF.SetString(humanize.Bytes(uint64(value)))
}
}
}
}
}
case reflect.String:
f.SetString(fv)
if vv, ok := schema.Stat.ColumnsToParse[schema.Stat.Columns[i].Name]; ok {
switch vv {
case schema.TypeStatus:
hF := s.FieldByName(column + "ParsedStatus")
if hF.IsValid() {
if hF.CanSet() {
hF.SetString(convertProcStatus(fv))
}
}
}
}
}
}
}
}
}
}
if err := scanner.Err(); err != nil {
return Stat{}, err
}
return st.update(up)
}
func (s *Stat) update(up Uptime) (Stat, error) {
if s == nil {
return Stat{}, nil
}
if strings.HasPrefix(s.Comm, "(") {
s.Comm = s.Comm[1:]
}
if strings.HasSuffix(s.Comm, ")") {
s.Comm = s.Comm[:len(s.Comm)-1]
}
return *s, nil
}
const statTmpl = `
----------------------------------------
[/proc/{{.Pid}}/stat]
Name: {{.Comm}}
State: {{.StateParsedStatus}}
Pid: {{.Pid}}
Ppid: {{.Ppid}}
NumThreads: {{.NumThreads}}
Rss: {{.RssParsedBytes}} ({{.RssBytesN}})
Rsslim: {{.RsslimParsedBytes}} ({{.RsslimBytesN}})
Vsize: {{.VsizeParsedBytes}} ({{.VsizeBytesN}})
Starttime: {{.Starttime}}
Utime: {{.Utime}}
Stime: {{.Stime}}
Cutime: {{.Cutime}}
Cstime: {{.Cstime}}
Session: {{.Session}}
TtyNr: {{.TtyNr}}
Tpgid: {{.Tpgid}}
Flags: {{.Flags}}
minflt: {{.Minflt}}
cminflt: {{.Cminflt}}
majflt: {{.Majflt}}
cmajflt: {{.Cmajflt}}
priority: {{.Priority}}
nice: {{.Nice}}
itrealvalue: {{.Itrealvalue}}
startcode: {{.Startcode}}
endcode: {{.Endcode}}
startstack: {{.Startstack}}
lstkesp: {{.Kstkesp}}
lstkeip: {{.Kstkeip}}
signal: {{.Signal}}
blocked: {{.Blocked}}
sigignore: {{.Sigignore}}
sigcatch: {{.Sigcatch}}
wchan: {{.Wchan}}
nswap: {{.Nswap}}
cnswap: {{.Cnswap}}
exitSignal: {{.ExitSignal}}
processor: {{.Processor}}
rt_priority: {{.RtPriority}}
policy: {{.Policy}}
delayacct_blkio_ticks:
{{.DelayacctBlkioTicks}}
guest_time: {{.GuestTime}}
cguest_time: {{.CguestTime}}
start_data: {{.StartData}}
end_data: {{.EndData}}
start_brk: {{.StartBrk}}
arg_start: {{.ArgStart}}
arg_end: {{.ArgEnd}}
env_start: {{.EnvStart}}
env_end: {{.EnvEnd}}
exit_code: {{.ExitCode}}
----------------------------------------
`
func (s Stat) String() string {
tpl := template.Must(template.New("statTmpl").Parse(statTmpl))
buf := new(bytes.Buffer)
if err := tpl.Execute(buf, s); err != nil {
log.Fatal(err)
}
return buf.String()
}
// GetCPUPercentage returns the average CPU usage in percentage.
// http://stackoverflow.com/questions/16726779/how-do-i-get-the-total-cpu-usage-of-an-application-from-proc-pid-stat
// This sometimes differ from the one in 'top' command.
// So do not use it!
func (s Stat) GetCPUPercentage(up Uptime) (float64, error) {
totalSec := s.Utime + s.Stime
totalSec += s.Cutime + s.Cstime
out, err := exec.Command("/usr/bin/getconf", "CLK_TCK").Output()
if err != nil {
return 0, err
}
ot := strings.TrimSpace(strings.Replace(string(out), "\n", "", -1))
hertz, err := strconv.ParseUint(ot, 10, 64)
if err != nil || hertz == 0 {
return 0, err
}
tookSec := up.UptimeTotal - (float64(s.Starttime) / float64(hertz))
if hertz == 0 || tookSec == 0.0 {
return 0.0, nil
}
return 100 * ((float64(totalSec) / float64(hertz)) / float64(tookSec)), nil
}

View File

@ -1,165 +0,0 @@
package psn
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"strings"
"text/template"
"github.com/dustin/go-humanize"
"gopkg.in/yaml.v2"
)
// GetProcStatusByPID reads '/proc/$PID/status' data.
func GetProcStatusByPID(pid int64) (s Status, err error) {
return parseProcStatusByPID(pid)
}
func rawProcStatus(pid int64) (Status, error) {
fpath := fmt.Sprintf("/proc/%d/status", pid)
f, err := openToRead(fpath)
if err != nil {
return Status{}, err
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return Status{}, err
}
rs := Status{}
if err := yaml.Unmarshal(b, &rs); err != nil {
return rs, err
}
return rs, nil
}
func parseProcStatusByPID(pid int64) (Status, error) {
rs, err := rawProcStatus(pid)
if err != nil {
return rs, err
}
rs.StateParsedStatus = strings.TrimSpace(rs.State)
u, _ := humanize.ParseBytes(rs.VmPeak)
rs.VmPeakBytesN = u
rs.VmPeakParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(rs.VmSize)
rs.VmSizeBytesN = u
rs.VmSizeParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(rs.VmLck)
rs.VmLckBytesN = u
rs.VmLckParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(rs.VmPin)
rs.VmPinBytesN = u
rs.VmPinParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(rs.VmHWM)
rs.VmHWMBytesN = u
rs.VmHWMParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(rs.VmRSS)
rs.VmRSSBytesN = u
rs.VmRSSParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(rs.VmData)
rs.VmDataBytesN = u
rs.VmDataParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(rs.VmStk)
rs.VmStkBytesN = u
rs.VmStkParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(rs.VmExe)
rs.VmExeBytesN = u
rs.VmExeParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(rs.VmLib)
rs.VmLibBytesN = u
rs.VmLibParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(rs.VmPTE)
rs.VmPTEBytesN = u
rs.VmPTEParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(rs.VmPMD)
rs.VmPMDBytesN = u
rs.VmPMDParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(rs.VmSwap)
rs.VmSwapBytesN = u
rs.VmSwapParsedBytes = humanize.Bytes(u)
u, _ = humanize.ParseBytes(rs.HugetlbPages)
rs.HugetlbPagesBytesN = u
rs.HugetlbPagesParsedBytes = humanize.Bytes(u)
return rs, nil
}
const statusTmpl = `
----------------------------------------
[/proc/{{.Pid}}/status]
Name: {{.Name}}
Umask: {{.Umask}}
State: {{.StateParsedStatus}}
Tgid: {{.Tgid}}
Ngid: {{.Ngid}}
Pid: {{.Pid}}
PPid: {{.PPid}}
TracerPid: {{.TracerPid}}
FDSize: {{.FDSize}}
VmPeak: {{.VmPeakParsedBytes}}
VmSize: {{.VmSizeParsedBytes}}
VmLck: {{.VmLckParsedBytes}}
VmPin: {{.VmPinParsedBytes}}
VmHWM: {{.VmHWMParsedBytes}}
VmRSS: {{.VmRSSParsedBytes}}
VmData: {{.VmDataParsedBytes}}
VmStk: {{.VmStkParsedBytes}}
VmExe: {{.VmExeParsedBytes}}
VmLib: {{.VmLibParsedBytes}}
VmPTE: {{.VmPTEParsedBytes}}
VmPMD: {{.VmPMDParsedBytes}}
VmSwap: {{.VmSwapParsedBytes}}
HugetlbPages: {{.HugetlbPagesParsedBytes}}
Threads: {{.Threads}}
Groups: {{.Groups}}
Uid: {{.Uid}}
Gid: {{.Gid}}
SigQ: {{.SigQ}}
SigPnd: {{.SigPnd}}
ShdPnd: {{.ShdPnd}}
SigBlk: {{.SigBlk}}
SigIgn: {{.SigIgn}}
SigCgt: {{.SigCgt}}
CapInh: {{.CapInh}}
CapPrm: {{.CapPrm}}
CapEff: {{.CapEff}}
CapBnd: {{.CapBnd}}
Seccomp: {{.Seccomp}}
Cpus_allowed: {{.CpusAllowed}}
Cpus_allowed_list: {{.CpusAllowedList}}
Mems_allowed: {{.MemsAllowed}}
Mems_allowed_list: {{.MemsAllowedList}}
voluntary_ctxt_switches:
{{.VoluntaryCtxtSwitches}}
nonvoluntary_ctxt_switches:
{{.NonvoluntaryCtxtSwitches}}
----------------------------------------
`
func (s Status) String() string {
tpl := template.Must(template.New("statusTmpl").Parse(statusTmpl))
buf := new(bytes.Buffer)
if err := tpl.Execute(buf, s); err != nil {
log.Fatal(err)
}
return buf.String()
}

View File

@ -1,398 +0,0 @@
// Package schema defines proc schema.
package schema
import "reflect"
// RawDataType defines how the raw data bytes are defined.
type RawDataType int
const (
TypeBytes RawDataType = iota
TypeInt64
TypeFloat64
TypeTimeMicroseconds
TypeTimeSeconds
TypeIPAddress
TypeStatus
)
// RawData defines 'proc' raw data.
type RawData struct {
// IsYAML is true if raw data is parsable in YAML.
IsYAML bool
Columns []Column
ColumnsToParse map[string]RawDataType
}
// Column represents the schema column.
type Column struct {
Name string
Godoc string
Kind reflect.Kind
}
// NetDev represents '/proc/net/dev'
// (See http://man7.org/linux/man-pages/man5/proc.5.html
// or http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html).
var NetDev = RawData{
IsYAML: false,
Columns: []Column{
{"interface", "network interface", reflect.String},
{"receive_bytes", "total number of bytes of data received by the interface", reflect.Uint64},
{"receive_packets", "total number of packets of data received by the interface", reflect.Uint64},
{"receive_errs", "total number of receive errors detected by the device driver", reflect.Uint64},
{"receive_drop", "total number of packets dropped by the device driver", reflect.Uint64},
{"receive_fifo", "number of FIFO buffer errors", reflect.Uint64},
{"receive_frame", "number of packet framing errors", reflect.Uint64},
{"receive_compressed", "number of compressed packets received by the device driver", reflect.Uint64},
{"receive_multicast", "number of multicast frames received by the device driver", reflect.Uint64},
{"transmit_bytes", "total number of bytes of data transmitted by the interface", reflect.Uint64},
{"transmit_packets", "total number of packets of data transmitted by the interface", reflect.Uint64},
{"transmit_errs", "total number of receive errors detected by the device driver", reflect.Uint64},
{"transmit_drop", "total number of packets dropped by the device driver", reflect.Uint64},
{"transmit_fifo", "number of FIFO buffer errors", reflect.Uint64},
{"transmit_colls", "number of collisions detected on the interface", reflect.Uint64},
{"transmit_carrier", "number of carrier losses detected by the device driver", reflect.Uint64},
},
ColumnsToParse: map[string]RawDataType{
"receive_bytes": TypeBytes,
"transmit_bytes": TypeBytes,
},
}
// NetTCP represents '/proc/net/tcp' and '/proc/net/tcp6'
// (See http://man7.org/linux/man-pages/man5/proc.5.html
// and http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html).
var NetTCP = RawData{
IsYAML: false,
Columns: []Column{
{"sl", "kernel hash slot", reflect.Uint64},
{"local_address", "local-address:port", reflect.String},
{"rem_address", "remote-address:port", reflect.String},
{"st", "internal status of socket", reflect.String},
{"tx_queue", "outgoing data queue in terms of kernel memory usage", reflect.String},
{"rx_queue", "incoming data queue in terms of kernel memory usage", reflect.String},
{"tr", "internal information of the kernel socket state", reflect.String},
{"tm->when", "internal information of the kernel socket state", reflect.String},
{"retrnsmt", "internal information of the kernel socket state", reflect.String},
{"uid", "effective UID of the creator of the socket", reflect.Uint64},
{"timeout", "timeout", reflect.Uint64},
{"inode", "inode raw data", reflect.String},
},
ColumnsToParse: map[string]RawDataType{
"local_address": TypeIPAddress,
"rem_address": TypeIPAddress,
"st": TypeStatus,
},
}
// TopCommandRow represents a row in 'top' command output.
// (See http://man7.org/linux/man-pages/man1/top.1.html).
var TopCommandRow = RawData{
IsYAML: false,
Columns: []Column{
{"PID", "pid of the process", reflect.Int64},
{"USER", "user name", reflect.String},
{"PR", "priority", reflect.String},
{"NI", "nice value of the task", reflect.String},
{"VIRT", "total amount of virtual memory used by the task (in KiB)", reflect.String},
{"RES", "non-swapped physical memory a task is using (in KiB)", reflect.String},
{"SHR", "amount of shared memory available to a task, not all of which is typically resident (in KiB)", reflect.String},
{"S", "process status", reflect.String},
{"CPUPercent", "%CPU", reflect.Float64},
{"MEMPercent", "%MEM", reflect.Float64},
{"TIME", "CPU time (TIME+)", reflect.String},
{"COMMAND", "command", reflect.String},
},
ColumnsToParse: map[string]RawDataType{
"S": TypeStatus,
"VIRT": TypeBytes,
"RES": TypeBytes,
"SHR": TypeBytes,
},
}
// LoadAvg represents '/proc/loadavg'
// (See http://man7.org/linux/man-pages/man5/proc.5.html).
var LoadAvg = RawData{
IsYAML: false,
Columns: []Column{
{"load-avg-1-minute", "total uptime in seconds", reflect.Float64},
{"load-avg-5-minute", "total uptime in seconds", reflect.Float64},
{"load-avg-15-minute", "total uptime in seconds", reflect.Float64},
{"runnable-kernel-scheduling-entities", "number of currently runnable kernel scheduling entities (processes, threads)", reflect.Int64},
{"current-kernel-scheduling-entities", "number of kernel scheduling entities that currently exist on the system", reflect.Int64},
{"pid", "PID of the process that was most recently created on the system", reflect.Int64},
},
ColumnsToParse: map[string]RawDataType{},
}
// Uptime represents '/proc/uptime'
// (See http://man7.org/linux/man-pages/man5/proc.5.html).
var Uptime = RawData{
IsYAML: false,
Columns: []Column{
{"uptime-total", "total uptime in seconds", reflect.Float64},
{"uptime-idle", "total amount of time in seconds spent in idle process", reflect.Float64},
},
ColumnsToParse: map[string]RawDataType{
"uptime-total": TypeTimeSeconds,
"uptime-idle": TypeTimeSeconds,
},
}
// DfCommandRow represents 'df' command output row
// (See https://en.wikipedia.org/wiki/Df_(Unix)
// and https://www.gnu.org/software/coreutils/manual/html_node/df-invocation.html
// and 'df --all --sync --block-size=1024 --output=source,target,fstype,file,itotal,iavail,iused,ipcent,size,avail,used,pcent'
// and the output unit is kilobytes).
var DfCommandRow = RawData{
IsYAML: false,
Columns: []Column{
{"file-system", "file system ('source')", reflect.String},
{"device", "device name", reflect.String},
{"mounted-on", "'mounted on' ('target')", reflect.String},
{"file-system-type", "file system type ('fstype')", reflect.String},
{"file", "file name if specified on the command line ('file')", reflect.String},
{"inodes", "total number of inodes ('itotal')", reflect.Int64},
{"ifree", "number of available inodes ('iavail')", reflect.Int64},
{"iused", "number of used inodes ('iused')", reflect.Int64},
{"iused-percent", "percentage of iused divided by itotal ('ipcent')", reflect.String},
{"total-blocks", "total number of 1K-blocks ('size')", reflect.Int64},
{"available-blocks", "number of available 1K-blocks ('avail')", reflect.Int64},
{"used-blocks", "number of used 1K-blocks ('used')", reflect.Int64},
{"used-blocks-percent", "percentage of used-blocks divided by total-blocks ('pcent')", reflect.String},
},
ColumnsToParse: map[string]RawDataType{
"total-blocks": TypeBytes,
"available-blocks": TypeBytes,
"used-blocks": TypeBytes,
},
}
// Mtab represents '/etc/mtab'
// (See https://en.wikipedia.org/wiki/Fstab
// and https://en.wikipedia.org/wiki/Mtab).
var Mtab = RawData{
IsYAML: false,
Columns: []Column{
{"file-system", "file system", reflect.String},
{"mounted-on", "'mounted on'", reflect.String},
{"file-system-type", "file system type", reflect.String},
{"options", "file system type", reflect.String},
{"dump", "number indicating whether and how often the file system should be backed up by the dump program; a zero indicates the file system will never be automatically backed up", reflect.Int},
{"pass", "number indicating the order in which the fsck program will check the devices for errors at boot time; this is 1 for the root file system and either 2 (meaning check after root) or 0 (do not check) for all other devices", reflect.Int},
},
ColumnsToParse: map[string]RawDataType{},
}
// DiskStat represents '/proc/diskstats'
// (See https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
// and https://www.kernel.org/doc/Documentation/iostats.txt).
var DiskStat = RawData{
IsYAML: false,
Columns: []Column{
{"major-number", "major device number", reflect.Uint64},
{"minor-number", "minor device number", reflect.Uint64},
{"device-name", "device name", reflect.String},
{"reads-completed", "total number of reads completed successfully", reflect.Uint64},
{"reads-merged", "total number of reads merged when adjacent to each other", reflect.Uint64},
{"sectors-read", "total number of sectors read successfully", reflect.Uint64},
{"time-spent-on-reading-ms", "total number of milliseconds spent by all reads", reflect.Uint64},
{"writes-completed", "total number of writes completed successfully", reflect.Uint64},
{"writes-merged", "total number of writes merged when adjacent to each other", reflect.Uint64},
{"sectors-written", "total number of sectors written successfully", reflect.Uint64},
{"time-spent-on-writing-ms", "total number of milliseconds spent by all writes", reflect.Uint64},
{"I/Os-in-progress", "only field that should go to zero (incremented as requests are on request_queue)", reflect.Uint64},
{"time-spent-on-I/Os-ms", "milliseconds spent doing I/Os", reflect.Uint64},
{"weighted-time-spent-on-I/Os-ms", "weighted milliseconds spent doing I/Os (incremented at each I/O start, I/O completion, I/O merge)", reflect.Uint64},
},
ColumnsToParse: map[string]RawDataType{
"time-spent-on-reading-ms": TypeTimeMicroseconds,
"time-spent-on-writing-ms": TypeTimeMicroseconds,
"time-spent-on-I/Os-ms": TypeTimeMicroseconds,
"weighted-time-spent-on-I/Os-ms": TypeTimeMicroseconds,
},
}
// IO represents 'proc/$PID/io'
// (See http://man7.org/linux/man-pages/man5/proc.5.html).
var IO = RawData{
IsYAML: true,
Columns: []Column{
{"rchar", "number of bytes which this task has caused to be read from storage (sum of bytes which this process passed to read)", reflect.Uint64},
{"wchar", "number of bytes which this task has caused, or shall cause to be written to disk", reflect.Uint64},
{"syscr", "number of read I/O operations", reflect.Uint64},
{"syscw", "number of write I/O operations", reflect.Uint64},
{"read_bytes", "number of bytes which this process really did cause to be fetched from the storage layer", reflect.Uint64},
{"write_bytes", "number of bytes which this process caused to be sent to the storage layer", reflect.Uint64},
{"cancelled_write_bytes", "number of bytes which this process caused to not happen by truncating pagecache", reflect.Uint64},
},
ColumnsToParse: map[string]RawDataType{
"rchar": TypeBytes,
"wchar": TypeBytes,
"read_bytes": TypeBytes,
"write_bytes": TypeBytes,
"cancelled_write_bytes": TypeBytes,
},
}
// Stat represents '/proc/$PID/stat'
// (See http://man7.org/linux/man-pages/man5/proc.5.html).
var Stat = RawData{
IsYAML: false,
Columns: []Column{
{"pid", "process ID", reflect.Int64},
{"comm", "filename of the executable (originally in parentheses, automatically removed by this package)", reflect.String},
{"state", "one character that represents the state of the process", reflect.String},
{"ppid", "PID of the parent process", reflect.Int64},
{"pgrp", "group ID of the process", reflect.Int64},
{"session", "session ID of the process", reflect.Int64},
{"tty_nr", "controlling terminal of the process", reflect.Int64},
{"tpgid", "ID of the foreground process group of the controlling terminal of the process", reflect.Int64},
{"flags", "kernel flags word of the process", reflect.Int64},
{"minflt", "number of minor faults the process has made which have not required loading a memory page from disk", reflect.Uint64},
{"cminflt", "number of minor faults that the process's waited-for children have made", reflect.Uint64},
{"majflt", "number of major faults the process has made which have required loading a memory page from disk", reflect.Uint64},
{"cmajflt", "number of major faults that the process's waited-for children have made", reflect.Uint64},
{"utime", "number of clock ticks that this process has been scheduled in user mode (includes guest_time)", reflect.Uint64},
{"stime", "number of clock ticks that this process has been scheduled in kernel mode", reflect.Uint64},
{"cutime", "number of clock ticks that this process's waited-for children have been scheduled in user mode", reflect.Uint64},
{"cstime", "number of clock ticks that this process's waited-for children have been scheduled in kernel mode", reflect.Uint64},
{"priority", "for processes running a real-time scheduling policy, the negated scheduling priority, minus one; that is, a number in the range -2 to -100, corresponding to real-time priorities 1 to 99. For processes running under a non-real-time scheduling policy, this is the raw nice value. The kernel stores nice values as numbers in the range 0 (high) to 39 (low)", reflect.Int64},
{"nice", "nice value, a value in the range 19 (low priority) to -20 (high priority)", reflect.Int64},
{"num_threads", "number of threads in this process", reflect.Int64},
{"itrealvalue", "no longer maintained", reflect.Int64},
{"starttime", "time(number of clock ticks) the process started after system boot", reflect.Uint64},
{"vsize", "virtual memory size in bytes", reflect.Uint64},
{"rss", "resident set size: number of pages the process has in real memory (text, data, or stack space but does not include pages which have not been demand-loaded in, or which are swapped out)", reflect.Int64},
{"rsslim", "current soft limit in bytes on the rss of the process", reflect.Uint64},
{"startcode", "address above which program text can run", reflect.Uint64},
{"endcode", "address below which program text can run", reflect.Uint64},
{"startstack", "address of the start (i.e., bottom) of the stack", reflect.Uint64},
{"kstkesp", "current value of ESP (stack pointer), as found in the kernel stack page for the process", reflect.Uint64},
{"kstkeip", "current EIP (instruction pointer)", reflect.Uint64},
{"signal", "obsolete, because it does not provide information on real-time signals (use /proc/$PID/status)", reflect.Uint64},
{"blocked", "obsolete, because it does not provide information on real-time signals (use /proc/$PID/status)", reflect.Uint64},
{"sigignore", "obsolete, because it does not provide information on real-time signals (use /proc/$PID/status)", reflect.Uint64},
{"sigcatch", "obsolete, because it does not provide information on real-time signals (use /proc/$PID/status)", reflect.Uint64},
{"wchan", "channel in which the process is waiting (address of a location in the kernel where the process is sleeping)", reflect.Uint64},
{"nswap", "not maintained (number of pages swapped)", reflect.Uint64},
{"cnswap", "not maintained (cumulative nswap for child processes)", reflect.Uint64},
{"exit_signal", "signal to be sent to parent when we die", reflect.Int64},
{"processor", "CPU number last executed on", reflect.Int64},
{"rt_priority", "real-time scheduling priority, a number in the range 1 to 99 for processes scheduled under a real-time policy, or 0, for non-real-time processes", reflect.Uint64},
{"policy", "scheduling policy", reflect.Uint64},
{"delayacct_blkio_ticks", "aggregated block I/O delays, measured in clock ticks", reflect.Uint64},
{"guest_time", "number of clock ticks spent running a virtual CPU for a guest operating system", reflect.Uint64},
{"cguest_time", "number of clock ticks (guest_time of the process's children)", reflect.Uint64},
{"start_data", "address above which program initialized and uninitialized (BSS) data are placed", reflect.Uint64},
{"end_data", "address below which program initialized and uninitialized (BSS) data are placed", reflect.Uint64},
{"start_brk", "address above which program heap can be expanded with brk", reflect.Uint64},
{"arg_start", "address above which program command-line arguments are placed", reflect.Uint64},
{"arg_end", "address below program command-line arguments are placed", reflect.Uint64},
{"env_start", "address above which program environment is placed", reflect.Uint64},
{"env_end", "address below which program environment is placed", reflect.Uint64},
{"exit_code", "thread's exit status in the form reported by waitpid(2)", reflect.Int64},
},
ColumnsToParse: map[string]RawDataType{
"state": TypeStatus,
"vsize": TypeBytes,
"rss": TypeBytes,
"rsslim": TypeBytes,
},
}
// Status represents 'proc/$PID/status'
// (See http://man7.org/linux/man-pages/man5/proc.5.html).
var Status = RawData{
IsYAML: true,
Columns: []Column{
{"Name", "command run by this process", reflect.String},
{"Umask", "process umask, expressed in octal with a leading", reflect.String},
{"State", "current state of the process: R (running), S (sleeping), D (disk sleep), T (stopped), T (tracing stop), Z (zombie), or X (dead)", reflect.String},
{"Tgid", "thread group ID", reflect.Int64},
{"Ngid", "NUMA group ID", reflect.Int64},
{"Pid", "process ID", reflect.Int64},
{"PPid", "parent process ID, which launches the Pid", reflect.Int64},
{"TracerPid", "PID of process tracing this process (0 if not being traced)", reflect.Int64},
{"Uid", "real, effective, saved set, and filesystem UIDs", reflect.String},
{"Gid", "real, effective, saved set, and filesystem UIDs", reflect.String},
{"FDSize", "number of file descriptor slots currently allocated", reflect.Uint64},
{"Groups", "supplementary group list", reflect.String},
{"NStgid", "thread group ID (i.e., PID) in each of the PID namespaces of which [pid] is a member", reflect.String},
{"NSpid", "thread ID (i.e., PID) in each of the PID namespaces of which [pid] is a member", reflect.String},
{"NSpgid", "process group ID (i.e., PID) in each of the PID namespaces of which [pid] is a member", reflect.String},
{"NSsid", "descendant namespace session ID hierarchy Session ID in each of the PID namespaces of which [pid] is a member", reflect.String},
{"VmPeak", "peak virtual memory usage. Vm includes physical memory and swap", reflect.String},
{"VmSize", "current virtual memory usage. VmSize is the total amount of memory required for this process", reflect.String},
{"VmLck", "locked memory size", reflect.String},
{"VmPin", "pinned memory size (pages can't be moved, requires direct-access to physical memory)", reflect.String},
{"VmHWM", `peak resident set size ("high water mark")`, reflect.String},
{"VmRSS", "resident set size. VmRSS is the actual amount in memory. Some memory can be swapped out to physical disk. So this is the real memory usage of the process", reflect.String},
{"VmData", "size of data segment", reflect.String},
{"VmStk", "size of stack", reflect.String},
{"VmExe", "size of text segments", reflect.String},
{"VmLib", "shared library code size", reflect.String},
{"VmPTE", "page table entries size", reflect.String},
{"VmPMD", "size of second-level page tables", reflect.String},
{"VmSwap", "swapped-out virtual memory size by anonymous private", reflect.String},
{"HugetlbPages", "size of hugetlb memory portions", reflect.String},
{"Threads", "number of threads in process containing this thread (process)", reflect.Uint64},
{"SigQ", "queued signals for the real user ID of this process (queued signals / limits)", reflect.String},
{"SigPnd", "number of signals pending for thread", reflect.String},
{"ShdPnd", "number of signals pending for process as a whole", reflect.String},
{"SigBlk", "masks indicating signals being blocked", reflect.String},
{"SigIgn", "masks indicating signals being ignored", reflect.String},
{"SigCgt", "masks indicating signals being caught", reflect.String},
{"CapInh", "masks of capabilities enabled in inheritable sets", reflect.String},
{"CapPrm", "masks of capabilities enabled in permitted sets", reflect.String},
{"CapEff", "masks of capabilities enabled in effective sets", reflect.String},
{"CapBnd", "capability Bounding set", reflect.String},
{"CapAmb", "ambient capability set", reflect.String},
{"Seccomp", "seccomp mode of the process (0 means SECCOMP_MODE_DISABLED; 1 means SECCOMP_MODE_STRICT; 2 means SECCOMP_MODE_FILTER)", reflect.Uint64},
{"Cpus_allowed", "mask of CPUs on which this process may run", reflect.String},
{"Cpus_allowed_list", "list of CPUs on which this process may run", reflect.String},
{"Mems_allowed", "mask of memory nodes allowed to this process", reflect.String},
{"Mems_allowed_list", "list of memory nodes allowed to this process", reflect.String},
{"voluntary_ctxt_switches", "number of voluntary context switches", reflect.Uint64},
{"nonvoluntary_ctxt_switches", "number of involuntary context switches", reflect.Uint64},
},
ColumnsToParse: map[string]RawDataType{
"State": TypeStatus,
"VmPeak": TypeBytes,
"VmSize": TypeBytes,
"VmLck": TypeBytes,
"VmPin": TypeBytes,
"VmHWM": TypeBytes,
"VmRSS": TypeBytes,
"VmData": TypeBytes,
"VmStk": TypeBytes,
"VmExe": TypeBytes,
"VmLib": TypeBytes,
"VmPTE": TypeBytes,
"VmPMD": TypeBytes,
"VmSwap": TypeBytes,
"HugetlbPages": TypeBytes,
},
}

View File

@ -1,48 +0,0 @@
package schema
import (
"fmt"
"reflect"
"strings"
)
// ToField converts raw YAML key to Go field name.
func ToField(s string) string {
s = strings.Replace(s, "-", "_", -1)
s = strings.Replace(s, "/", "", -1)
s = strings.Replace(s, ">", "", -1)
cs := strings.Split(s, "_")
var ss []string
for _, v := range cs {
if len(v) > 0 {
ss = append(ss, strings.Title(v))
}
}
return strings.TrimSpace(strings.Join(ss, ""))
}
// ToFieldTag converts raw key to field name.
func ToFieldTag(s string) string {
s = strings.ToLower(s)
s = strings.Replace(s, "-", "_", -1)
s = strings.Replace(s, "/", "", -1)
return strings.Replace(s, ">", "", -1)
}
// GoType converts to Go type.
func GoType(tp reflect.Kind) string {
switch tp {
case reflect.Float64:
return "float64"
case reflect.Uint64:
return "uint64"
case reflect.Int:
return "int"
case reflect.Int64:
return "int64"
case reflect.String:
return "string"
default:
panic(fmt.Errorf("unknown type %q", tp.String()))
}
}

View File

@ -1,99 +0,0 @@
package psn
import (
"os"
"runtime"
"strconv"
"strings"
"time"
humanize "github.com/dustin/go-humanize"
)
func isInt(s string) bool {
_, err := strconv.Atoi(s)
return err == nil
}
func humanizeDurationMs(ms uint64) string {
s := humanize.Time(time.Now().Add(-1 * time.Duration(ms) * time.Millisecond))
if s == "now" {
s = "0 seconds"
}
return strings.TrimSpace(strings.Replace(s, " ago", "", -1))
}
func humanizeDurationSecond(sec uint64) string {
s := humanize.Time(time.Now().Add(-1 * time.Duration(sec) * time.Second))
if s == "now" {
s = "0 seconds"
}
return strings.TrimSpace(strings.Replace(s, " ago", "", -1))
}
func openToRead(fpath string) (*os.File, error) {
f, err := os.OpenFile(fpath, os.O_RDONLY, 0444)
if err != nil {
return nil, err
}
return f, nil
}
func openToAppend(fpath string) (*os.File, error) {
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0777)
if err != nil {
return nil, err
}
return f, nil
}
func openToOverwrite(fpath string) (*os.File, error) {
f, err := os.OpenFile(fpath, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0777)
if err != nil {
return nil, err
}
return f, nil
}
func toFile(data []byte, fpath string) error {
f, err := openToOverwrite(fpath)
if err != nil {
f, err = os.Create(fpath)
if err != nil {
return err
}
}
_, err = f.Write(data)
f.Close()
return err
}
func homeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
return home
}
return os.Getenv("HOME")
}
// exist returns true if the file or directory exists.
func exist(fpath string) bool {
st, err := os.Stat(fpath)
if err != nil {
if os.IsNotExist(err) {
return false
}
}
if st.IsDir() {
return true
}
if _, err := os.Stat(fpath); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}

View File

@ -1,80 +0,0 @@
package psn
import (
"fmt"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
)
func convertProcStatus(s string) string {
ns := strings.TrimSpace(s)
if len(s) > 1 {
ns = ns[:1]
}
switch ns {
case "D":
return "D (uninterruptible sleep)"
case "R":
return "R (running)"
case "S":
return "S (sleeping)"
case "T":
return "T (stopped by job control signal)"
case "t":
return "t (stopped by debugger during trace)"
case "Z":
return "Z (zombie)"
default:
return fmt.Sprintf("unknown process %q", s)
}
}
func pidFromFd(s string) (int64, error) {
// get 5261 from '/proc/5261/fd/69'
return strconv.ParseInt(strings.Split(s, "/")[2], 10, 64)
}
// ListPIDs reads all PIDs in '/proc'.
func ListPIDs() ([]int64, error) {
ds, err := ioutil.ReadDir("/proc")
if err != nil {
return nil, err
}
pids := make([]int64, 0, len(ds))
for _, f := range ds {
if f.IsDir() && isInt(f.Name()) {
id, err := strconv.ParseInt(f.Name(), 10, 64)
if err != nil {
return nil, err
}
pids = append(pids, id)
}
}
return pids, nil
}
// ListProcFds reads '/proc/*/fd/*' to grab process IDs.
func ListProcFds() ([]string, error) {
// returns the names of all files matching pattern
// or nil if there is no matching file
fs, err := filepath.Glob("/proc/[0-9]*/fd/[0-9]*")
if err != nil {
return nil, err
}
return fs, nil
}
// GetProgram returns the program name.
func GetProgram(pid int64) (string, error) {
// Readlink needs root permission
// return os.Readlink(fmt.Sprintf("/proc/%d/exe", pid))
rs, err := rawProcStatus(pid)
if err != nil {
return "", err
}
return rs.Name, nil
}

161
vendor/github.com/gyuho/linux-inspect/schema/schema.go generated vendored Normal file
View File

@ -0,0 +1,161 @@
// Package schema represents linux-inspect schema.
package schema
import (
"bytes"
"fmt"
"reflect"
"strings"
)
// Generate generates go struct text from given RawData.
func Generate(raw RawData) string {
tagstr := "yaml"
if !raw.IsYAML {
tagstr = "column"
}
buf := new(bytes.Buffer)
for _, col := range raw.Columns {
goFieldName := ToField(col.Name)
goFieldTagName := col.Name
if !raw.IsYAML {
goFieldTagName = ToFieldTag(goFieldTagName)
}
if col.Godoc != "" {
buf.WriteString(fmt.Sprintf("\t// %s is %s.\n", goFieldName, col.Godoc))
}
buf.WriteString(fmt.Sprintf("\t%s\t%s\t`%s:\"%s\"`\n",
goFieldName,
GoType(col.Kind),
tagstr,
goFieldTagName,
))
// additional parsed column
if v, ok := raw.ColumnsToParse[col.Name]; ok {
switch v {
case TypeInt64, TypeFloat64:
// need no additional columns
case TypeBytes:
ntstr := "uint64"
if col.Kind == reflect.Int64 {
ntstr = "int64"
}
buf.WriteString(fmt.Sprintf("\t%sBytesN\t%s\t`%s:\"%s_bytes_n\"`\n",
goFieldName,
ntstr,
tagstr,
goFieldTagName,
))
buf.WriteString(fmt.Sprintf("\t%sParsedBytes\tstring\t`%s:\"%s_parsed_bytes\"`\n",
goFieldName,
tagstr,
goFieldTagName,
))
case TypeTimeMicroseconds, TypeTimeSeconds:
buf.WriteString(fmt.Sprintf("\t%sParsedTime\tstring\t`%s:\"%s_parsed_time\"`\n",
goFieldName,
tagstr,
goFieldTagName,
))
case TypeIPAddress:
buf.WriteString(fmt.Sprintf("\t%sParsedIPHost\tstring\t`%s:\"%s_parsed_ip_host\"`\n",
goFieldName,
tagstr,
goFieldTagName,
))
buf.WriteString(fmt.Sprintf("\t%sParsedIPPort\tint64\t`%s:\"%s_parsed_ip_port\"`\n",
goFieldName,
tagstr,
goFieldTagName,
))
case TypeStatus:
buf.WriteString(fmt.Sprintf("\t%sParsedStatus\tstring\t`%s:\"%s_parsed_status\"`\n",
goFieldName,
tagstr,
goFieldTagName,
))
default:
panic(fmt.Errorf("unknown parse type %d", raw.ColumnsToParse[col.Name]))
}
}
}
return buf.String()
}
// RawDataType defines how the raw data bytes are defined.
type RawDataType int
const (
TypeBytes RawDataType = iota
TypeInt64
TypeFloat64
TypeTimeMicroseconds
TypeTimeSeconds
TypeIPAddress
TypeStatus
)
// RawData defines 'proc' raw data.
type RawData struct {
// IsYAML is true if raw data is parsable in YAML.
IsYAML bool
Columns []Column
ColumnsToParse map[string]RawDataType
}
// Column represents the schema column.
type Column struct {
Name string
Godoc string
Kind reflect.Kind
}
// ToField converts raw YAML key to Go field name.
func ToField(s string) string {
s = strings.Replace(s, "-", "_", -1)
s = strings.Replace(s, "/", "", -1)
s = strings.Replace(s, ">", "", -1)
cs := strings.Split(s, "_")
var ss []string
for _, v := range cs {
if len(v) > 0 {
ss = append(ss, strings.Title(v))
}
}
return strings.TrimSpace(strings.Join(ss, ""))
}
// ToFieldTag converts raw key to field name.
func ToFieldTag(s string) string {
s = strings.ToLower(s)
s = strings.Replace(s, "-", "_", -1)
s = strings.Replace(s, "/", "", -1)
return strings.Replace(s, ">", "", -1)
}
// GoType converts to Go type.
func GoType(tp reflect.Kind) string {
switch tp {
case reflect.Float64:
return "float64"
case reflect.Uint64:
return "uint64"
case reflect.Int:
return "int"
case reflect.Int64:
return "int64"
case reflect.String:
return "string"
default:
panic(fmt.Errorf("unknown type %q", tp.String()))
}
}

2
vendor/github.com/gyuho/linux-inspect/top/doc.go generated vendored Normal file
View File

@ -0,0 +1,2 @@
// Package top wraps Linux 'top' command.
package top

38
vendor/github.com/gyuho/linux-inspect/top/generated.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package top
// updated at 2017-05-09 15:51:40.794906479 -0700 PDT
// Row represents a row in 'top' command output.
type Row struct {
// PID is pid of the process.
PID int64 `column:"pid"`
// USER is user name.
USER string `column:"user"`
// PR is priority.
PR string `column:"pr"`
// NI is nice value of the task.
NI string `column:"ni"`
// VIRT is total amount of virtual memory used by the task (in KiB).
VIRT string `column:"virt"`
VIRTBytesN uint64 `column:"virt_bytes_n"`
VIRTParsedBytes string `column:"virt_parsed_bytes"`
// RES is non-swapped physical memory a task is using (in KiB).
RES string `column:"res"`
RESBytesN uint64 `column:"res_bytes_n"`
RESParsedBytes string `column:"res_parsed_bytes"`
// SHR is amount of shared memory available to a task, not all of which is typically resident (in KiB).
SHR string `column:"shr"`
SHRBytesN uint64 `column:"shr_bytes_n"`
SHRParsedBytes string `column:"shr_parsed_bytes"`
// S is process status.
S string `column:"s"`
SParsedStatus string `column:"s_parsed_status"`
// CPUPercent is %CPU.
CPUPercent float64 `column:"cpupercent"`
// MEMPercent is %MEM.
MEMPercent float64 `column:"mempercent"`
// TIME is CPU time (TIME+).
TIME string `column:"time"`
// COMMAND is command.
COMMAND string `column:"command"`
}

View File

@ -1,4 +1,4 @@
package psn package top
import ( import (
"bytes" "bytes"
@ -18,7 +18,7 @@ import (
// PiB = pebibyte = 1024 TiB = 1,125,899,906,842,624 bytes // PiB = pebibyte = 1024 TiB = 1,125,899,906,842,624 bytes
// EiB = exbibyte = 1024 PiB = 1,152,921,504,606,846,976 bytes // EiB = exbibyte = 1024 PiB = 1,152,921,504,606,846,976 bytes
// //
func parseTopCommandKiB(s string) (bts uint64, hs string, err error) { func parseKiB(s string) (bts uint64, hs string, err error) {
s = strings.TrimSpace(s) s = strings.TrimSpace(s)
switch { switch {
// suffix 'm' means megabytes // suffix 'm' means megabytes
@ -54,8 +54,8 @@ func parseTopCommandKiB(s string) (bts uint64, hs string, err error) {
return return
} }
// TopRowHeaders is the headers in 'top' output. // Headers is the headers in 'top' output.
var TopRowHeaders = []string{ var Headers = []string{
"PID", "PID",
"USER", "USER",
"PR", "PR",
@ -70,21 +70,21 @@ var TopRowHeaders = []string{
"COMMAND", "COMMAND",
} }
type topCommandOutputRowIdx int type commandOutputRowIdx int
const ( const (
top_command_output_row_idx_pid topCommandOutputRowIdx = iota command_output_row_idx_pid commandOutputRowIdx = iota
top_command_output_row_idx_user command_output_row_idx_user
top_command_output_row_idx_pr command_output_row_idx_pr
top_command_output_row_idx_ni command_output_row_idx_ni
top_command_output_row_idx_virt command_output_row_idx_virt
top_command_output_row_idx_res command_output_row_idx_res
top_command_output_row_idx_shr command_output_row_idx_shr
top_command_output_row_idx_s command_output_row_idx_s
top_command_output_row_idx_cpu command_output_row_idx_cpu
top_command_output_row_idx_mem command_output_row_idx_mem
top_command_output_row_idx_time command_output_row_idx_time
top_command_output_row_idx_command command_output_row_idx_command
) )
var bytesToSkip = [][]byte{ var bytesToSkip = [][]byte{
@ -108,8 +108,8 @@ func topRowToSkip(data []byte) bool {
return false return false
} }
// ParseTopOutput parses 'top' command output and returns the rows. // Parse parses 'top' command output and returns the rows.
func ParseTopOutput(s string) ([]TopCommandRow, error) { func Parse(s string) ([]Row, error) {
lines := strings.Split(s, "\n") lines := strings.Split(s, "\n")
rows := make([][]string, 0, len(lines)) rows := make([][]string, 0, len(lines))
for _, line := range lines { for _, line := range lines {
@ -122,25 +122,25 @@ func ParseTopOutput(s string) ([]TopCommandRow, error) {
} }
row := strings.Fields(strings.TrimSpace(line)) row := strings.Fields(strings.TrimSpace(line))
if len(row) != len(TopRowHeaders) { if len(row) != len(Headers) {
return nil, fmt.Errorf("unexpected row column number %v (expected %v)", row, TopRowHeaders) return nil, fmt.Errorf("unexpected row column number %v (expected %v)", row, Headers)
} }
rows = append(rows, row) rows = append(rows, row)
} }
type result struct { type result struct {
row TopCommandRow row Row
err error err error
} }
rc := make(chan result, len(rows)) rc := make(chan result, len(rows))
for _, row := range rows { for _, row := range rows {
go func(row []string) { go func(row []string) {
tr, err := parseTopRow(row) tr, err := parseRow(row)
rc <- result{row: tr, err: err} rc <- result{row: tr, err: err}
}(row) }(row)
} }
tcRows := make([]TopCommandRow, 0, len(rows)) tcRows := make([]Row, 0, len(rows))
for len(tcRows) != len(rows) { for len(tcRows) != len(rows) {
select { select {
case rs := <-rc: case rs := <-rc:
@ -153,60 +153,83 @@ func ParseTopOutput(s string) ([]TopCommandRow, error) {
return tcRows, nil return tcRows, nil
} }
func parseTopRow(row []string) (TopCommandRow, error) { func parseRow(row []string) (Row, error) {
trow := TopCommandRow{ trow := Row{
USER: strings.TrimSpace(row[top_command_output_row_idx_user]), USER: strings.TrimSpace(row[command_output_row_idx_user]),
} }
pv, err := strconv.ParseInt(row[top_command_output_row_idx_pid], 10, 64) pv, err := strconv.ParseInt(row[command_output_row_idx_pid], 10, 64)
if err != nil { if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row) return Row{}, fmt.Errorf("parse error %v (row %v)", err, row)
} }
trow.PID = pv trow.PID = pv
trow.PR = strings.TrimSpace(row[top_command_output_row_idx_pr]) trow.PR = strings.TrimSpace(row[command_output_row_idx_pr])
trow.NI = strings.TrimSpace(row[top_command_output_row_idx_ni]) trow.NI = strings.TrimSpace(row[command_output_row_idx_ni])
virt, virtTxt, err := parseTopCommandKiB(row[top_command_output_row_idx_virt]) virt, virtTxt, err := parseKiB(row[command_output_row_idx_virt])
if err != nil { if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row) return Row{}, fmt.Errorf("parse error %v (row %v)", err, row)
} }
trow.VIRT = row[top_command_output_row_idx_virt] trow.VIRT = row[command_output_row_idx_virt]
trow.VIRTBytesN = virt trow.VIRTBytesN = virt
trow.VIRTParsedBytes = virtTxt trow.VIRTParsedBytes = virtTxt
res, resTxt, err := parseTopCommandKiB(row[top_command_output_row_idx_res]) res, resTxt, err := parseKiB(row[command_output_row_idx_res])
if err != nil { if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row) return Row{}, fmt.Errorf("parse error %v (row %v)", err, row)
} }
trow.RES = row[top_command_output_row_idx_res] trow.RES = row[command_output_row_idx_res]
trow.RESBytesN = res trow.RESBytesN = res
trow.RESParsedBytes = resTxt trow.RESParsedBytes = resTxt
shr, shrTxt, err := parseTopCommandKiB(row[top_command_output_row_idx_shr]) shr, shrTxt, err := parseKiB(row[command_output_row_idx_shr])
if err != nil { if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row) return Row{}, fmt.Errorf("parse error %v (row %v)", err, row)
} }
trow.SHR = row[top_command_output_row_idx_shr] trow.SHR = row[command_output_row_idx_shr]
trow.SHRBytesN = shr trow.SHRBytesN = shr
trow.SHRParsedBytes = shrTxt trow.SHRParsedBytes = shrTxt
trow.S = row[top_command_output_row_idx_s] trow.S = row[command_output_row_idx_s]
trow.SParsedStatus = convertProcStatus(row[top_command_output_row_idx_s]) trow.SParsedStatus = parseStatus(row[command_output_row_idx_s])
cnum, err := strconv.ParseFloat(row[top_command_output_row_idx_cpu], 64) cnum, err := strconv.ParseFloat(row[command_output_row_idx_cpu], 64)
if err != nil { if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row) return Row{}, fmt.Errorf("parse error %v (row %v)", err, row)
} }
trow.CPUPercent = cnum trow.CPUPercent = cnum
mnum, err := strconv.ParseFloat(row[top_command_output_row_idx_mem], 64) mnum, err := strconv.ParseFloat(row[command_output_row_idx_mem], 64)
if err != nil { if err != nil {
return TopCommandRow{}, fmt.Errorf("parse error %v (row %v)", err, row) return Row{}, fmt.Errorf("parse error %v (row %v)", err, row)
} }
trow.MEMPercent = mnum trow.MEMPercent = mnum
trow.TIME = row[top_command_output_row_idx_time] trow.TIME = row[command_output_row_idx_time]
return trow, nil return trow, nil
} }
func parseStatus(s string) string {
ns := strings.TrimSpace(s)
if len(s) > 1 {
ns = ns[:1]
}
switch ns {
case "D":
return "D (uninterruptible sleep)"
case "R":
return "R (running)"
case "S":
return "S (sleeping)"
case "T":
return "T (stopped by job control signal)"
case "t":
return "t (stopped by debugger during trace)"
case "Z":
return "Z (zombie)"
default:
return fmt.Sprintf("unknown process %q", s)
}
}

33
vendor/github.com/gyuho/linux-inspect/top/schema.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
package top
import (
"reflect"
"github.com/gyuho/linux-inspect/schema"
)
// RowSchema represents a row in 'top' command output.
// Reference http://man7.org/linux/man-pages/man1/top.1.html.
var RowSchema = schema.RawData{
IsYAML: false,
Columns: []schema.Column{
{Name: "PID", Godoc: "pid of the process", Kind: reflect.Int64},
{Name: "USER", Godoc: "user name", Kind: reflect.String},
{Name: "PR", Godoc: "priority", Kind: reflect.String},
{Name: "NI", Godoc: "nice value of the task", Kind: reflect.String},
{Name: "VIRT", Godoc: "total amount of virtual memory used by the task (in KiB)", Kind: reflect.String},
{Name: "RES", Godoc: "non-swapped physical memory a task is using (in KiB)", Kind: reflect.String},
{Name: "SHR", Godoc: "amount of shared memory available to a task, not all of which is typically resident (in KiB)", Kind: reflect.String},
{Name: "S", Godoc: "process status", Kind: reflect.String},
{Name: "CPUPercent", Godoc: "%CPU", Kind: reflect.Float64},
{Name: "MEMPercent", Godoc: "%MEM", Kind: reflect.Float64},
{Name: "TIME", Godoc: "CPU time (TIME+)", Kind: reflect.String},
{Name: "COMMAND", Godoc: "command", Kind: reflect.String},
},
ColumnsToParse: map[string]schema.RawDataType{
"S": schema.TypeStatus,
"VIRT": schema.TypeBytes,
"RES": schema.TypeBytes,
"SHR": schema.TypeBytes,
},
}

View File

@ -1,4 +1,4 @@
package psn package top
import ( import (
"bufio" "bufio"
@ -11,8 +11,8 @@ import (
"github.com/kr/pty" "github.com/kr/pty"
) )
// TopStream provides top command output stream. // Stream provides top command output stream.
type TopStream struct { type Stream struct {
cmd *exec.Cmd cmd *exec.Cmd
pmu sync.Mutex pmu sync.Mutex
@ -22,8 +22,8 @@ type TopStream struct {
wg sync.WaitGroup wg sync.WaitGroup
rcond *sync.Cond rcond *sync.Cond
rmu sync.RWMutex // protect results rmu sync.RWMutex // protect results
queue []TopCommandRow queue []Row
pid2TopCommandRow map[int64]TopCommandRow pid2Row map[int64]Row
err error err error
errc chan error errc chan error
@ -34,7 +34,7 @@ type TopStream struct {
} }
// StartStream starts 'top' command stream. // StartStream starts 'top' command stream.
func (cfg *TopConfig) StartStream() (*TopStream, error) { func (cfg *Config) StartStream() (*Stream, error) {
if err := cfg.createCmd(); err != nil { if err := cfg.createCmd(); err != nil {
return nil, err return nil, err
} }
@ -43,7 +43,7 @@ func (cfg *TopConfig) StartStream() (*TopStream, error) {
return nil, err return nil, err
} }
str := &TopStream{ str := &Stream{
cmd: cfg.cmd, cmd: cfg.cmd,
pmu: sync.Mutex{}, pmu: sync.Mutex{},
@ -53,8 +53,8 @@ func (cfg *TopConfig) StartStream() (*TopStream, error) {
rmu: sync.RWMutex{}, rmu: sync.RWMutex{},
// pre-allocate // pre-allocate
queue: make([]TopCommandRow, 0, 500), queue: make([]Row, 0, 500),
pid2TopCommandRow: make(map[int64]TopCommandRow, 500), pid2Row: make(map[int64]Row, 500),
err: nil, err: nil,
errc: make(chan error, 1), errc: make(chan error, 1),
@ -72,32 +72,32 @@ func (cfg *TopConfig) StartStream() (*TopStream, error) {
} }
// Stop kills the 'top' process and waits for it to exit. // Stop kills the 'top' process and waits for it to exit.
func (str *TopStream) Stop() error { func (str *Stream) Stop() error {
return str.close(true) return str.close(true)
} }
// Wait just waits for the 'top' process to exit. // Wait just waits for the 'top' process to exit.
func (str *TopStream) Wait() error { func (str *Stream) Wait() error {
return str.close(false) return str.close(false)
} }
// ErrChan returns the error from stream. // ErrChan returns the error from stream.
func (str *TopStream) ErrChan() <-chan error { func (str *Stream) ErrChan() <-chan error {
return str.errc return str.errc
} }
// Latest returns the latest top command outputs. // Latest returns the latest top command outputs.
func (str *TopStream) Latest() map[int64]TopCommandRow { func (str *Stream) Latest() map[int64]Row {
str.rmu.RLock() str.rmu.RLock()
cm := make(map[int64]TopCommandRow, len(str.pid2TopCommandRow)) cm := make(map[int64]Row, len(str.pid2Row))
for k, v := range str.pid2TopCommandRow { for k, v := range str.pid2Row {
cm[k] = v cm[k] = v
} }
str.rmu.RUnlock() str.rmu.RUnlock()
return cm return cm
} }
func (str *TopStream) noError() (noErr bool) { func (str *Stream) noError() (noErr bool) {
str.rmu.RLock() str.rmu.RLock()
noErr = str.err == nil noErr = str.err == nil
str.rmu.RUnlock() str.rmu.RUnlock()
@ -105,7 +105,7 @@ func (str *TopStream) noError() (noErr bool) {
} }
// feed new top results into the queue // feed new top results into the queue
func (str *TopStream) enqueue() { func (str *Stream) enqueue() {
defer str.wg.Done() defer str.wg.Done()
reader := bufio.NewReader(str.pt) reader := bufio.NewReader(str.pt)
for str.noError() { for str.noError() {
@ -130,12 +130,12 @@ func (str *TopStream) enqueue() {
} }
row := strings.Fields(line) row := strings.Fields(line)
if len(row) != len(TopRowHeaders) { if len(row) != len(Headers) {
str.rmu.Unlock() str.rmu.Unlock()
continue continue
} }
r, rerr := parseTopRow(row) r, rerr := parseRow(row)
if rerr != nil { if rerr != nil {
str.err = rerr str.err = rerr
str.rmu.Unlock() str.rmu.Unlock()
@ -156,7 +156,7 @@ func (str *TopStream) enqueue() {
// dequeue polls from 'top' process. // dequeue polls from 'top' process.
// And signals error channel if any. // And signals error channel if any.
func (str *TopStream) dequeue() { func (str *Stream) dequeue() {
str.rmu.Lock() str.rmu.Lock()
for { for {
// wait until there's output // wait until there's output
@ -172,7 +172,7 @@ func (str *TopStream) dequeue() {
row := str.queue[0] row := str.queue[0]
str.queue = str.queue[1:] str.queue = str.queue[1:]
str.pid2TopCommandRow[row.PID] = row str.pid2Row[row.PID] = row
toc := false toc := false
str.readymu.Lock() str.readymu.Lock()
@ -194,7 +194,7 @@ func (str *TopStream) dequeue() {
str.rmu.Unlock() str.rmu.Unlock()
} }
func (str *TopStream) close(kill bool) (err error) { func (str *Stream) close(kill bool) (err error) {
if str.cmd == nil { if str.cmd == nil {
return str.err return str.err
} }

View File

@ -1,17 +1,21 @@
package psn package top
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"os/exec" "os/exec"
"github.com/gyuho/linux-inspect/pkg/fileutil"
) )
// DefaultTopPath is the default 'top' command path. // DefaultExecPath is the default 'top' command path.
var DefaultTopPath = "/usr/bin/top" var DefaultExecPath = "/usr/bin/top"
// TopConfig configures 'top' command runs. // Config configures 'top' command runs.
type TopConfig struct { type Config struct {
// Exec is the 'top' command path.
// Defaults to '/usr/bin/top'.
Exec string Exec string
// MAKE THIS TRUE BY DEFAULT // MAKE THIS TRUE BY DEFAULT
@ -45,8 +49,14 @@ type TopConfig struct {
} }
// Flags returns the 'top' command flags. // Flags returns the 'top' command flags.
func (cfg *TopConfig) Flags() (fs []string) { func (cfg *Config) Flags() (fs []string) {
// batch mode by default // start 'top' in batch mode, which could be useful
// for sending output from 'top' to other programs or to a file.
// In this mode, 'top' will not accept input and runs until the interations
// limit ('-n' flag) or until killed.
//
// MAKE THIS TRUE BY DEFAULT
// OTHERWISE PARSER HAS TO DEAL WITH HIGHLIGHTED TEXTS
fs = append(fs, "-b") fs = append(fs, "-b")
if cfg.Limit > 0 { // if 1, command just exists after one output if cfg.Limit > 0 { // if 1, command just exists after one output
@ -64,12 +74,12 @@ func (cfg *TopConfig) Flags() (fs []string) {
return return
} }
// process updates with '*exec.Cmd' for the given 'TopConfig'. // process updates with '*exec.Cmd' for the given 'Config'.
func (cfg *TopConfig) createCmd() error { func (cfg *Config) createCmd() error {
if cfg == nil { if cfg == nil {
return fmt.Errorf("TopConfig is nil") return fmt.Errorf("Config is nil")
} }
if !exist(cfg.Exec) { if !fileutil.Exist(cfg.Exec) {
return fmt.Errorf("%q does not exist", cfg.Exec) return fmt.Errorf("%q does not exist", cfg.Exec)
} }
flags := cfg.Flags() flags := cfg.Flags()
@ -82,12 +92,12 @@ func (cfg *TopConfig) createCmd() error {
return nil return nil
} }
// GetTop returns all entries in 'top' command. // Get returns all entries in 'top' command.
// If pid<1, it reads all processes in 'top' command. // If pid<1, it reads all processes in 'top' command.
// This is one-time command. // This is one-time command.
func GetTop(topPath string, pid int64) ([]TopCommandRow, error) { func Get(topPath string, pid int64) ([]Row, error) {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
cfg := &TopConfig{ cfg := &Config{
Exec: topPath, Exec: topPath,
Limit: 1, Limit: 1,
IntervalSecond: 1, IntervalSecond: 1,
@ -95,6 +105,9 @@ func GetTop(topPath string, pid int64) ([]TopCommandRow, error) {
Writer: buf, Writer: buf,
cmd: nil, cmd: nil,
} }
if cfg.Exec == "" {
cfg.Exec = topPath
}
if err := cfg.createCmd(); err != nil { if err := cfg.createCmd(); err != nil {
return nil, err return nil, err
} }
@ -103,5 +116,5 @@ func GetTop(topPath string, pid int64) ([]TopCommandRow, error) {
if err := cfg.cmd.Run(); err != nil { if err := cfg.cmd.Run(); err != nil {
return nil, err return nil, err
} }
return ParseTopOutput(buf.String()) return Parse(buf.String())
} }