Merge pull request #94 from coreos/update

vendor: sync with etcd master
This commit is contained in:
Gyu-Ho Lee 2016-12-13 11:18:40 -08:00 committed by GitHub
commit 84ece6b1c5
28 changed files with 1793 additions and 838 deletions

View File

@ -3,7 +3,7 @@ language: go
sudo: false
go:
- 1.7.3
- 1.7.4
- tip
script:

View File

@ -71,6 +71,8 @@ const (
Request_etcdv2 Request_Database = 1
Request_ZooKeeper Request_Database = 2
Request_Consul Request_Database = 3
Request_zetcd Request_Database = 4
Request_cetcd Request_Database = 5
)
var Request_Database_name = map[int32]string{
@ -78,12 +80,16 @@ var Request_Database_name = map[int32]string{
1: "etcdv2",
2: "ZooKeeper",
3: "Consul",
4: "zetcd",
5: "cetcd",
}
var Request_Database_value = map[string]int32{
"etcdv3": 0,
"etcdv2": 1,
"ZooKeeper": 2,
"Consul": 3,
"zetcd": 4,
"cetcd": 5,
}
func (x Request_Database) String() string {
@ -937,38 +943,38 @@ var (
func init() { proto.RegisterFile("agent/message.proto", fileDescriptorMessage) }
var fileDescriptorMessage = []byte{
// 518 bytes of a gzipped FileDescriptorProto
// 528 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x93, 0xcf, 0x4e, 0xdb, 0x4c,
0x14, 0xc5, 0x63, 0x42, 0xc0, 0xbe, 0x01, 0x3e, 0x6b, 0xbe, 0xb6, 0x8c, 0x50, 0x65, 0x45, 0x16,
0x8b, 0x6c, 0x30, 0x12, 0xa9, 0x50, 0x17, 0x74, 0x93, 0x44, 0x95, 0x22, 0xfa, 0x07, 0xd9, 0xed,
0xa6, 0xbb, 0x89, 0x73, 0x71, 0x53, 0x92, 0xb9, 0xee, 0xcc, 0x18, 0x25, 0x7d, 0x92, 0x3e, 0x12,
0xcb, 0xbe, 0x41, 0xdb, 0xf4, 0x45, 0xaa, 0x4c, 0x88, 0x21, 0x05, 0xda, 0xdd, 0x9c, 0xf3, 0x3b,
0xe7, 0x5e, 0xc9, 0xba, 0x86, 0xff, 0x45, 0x86, 0xd2, 0x1c, 0x8e, 0x51, 0x6b, 0x91, 0x61, 0x94,
0x2b, 0x32, 0xc4, 0x6a, 0xd6, 0xdc, 0x3b, 0xc8, 0x86, 0xe6, 0x63, 0xd1, 0x8f, 0x52, 0x1a, 0x1f,
0x66, 0x94, 0xd1, 0xa1, 0xa5, 0xfd, 0xe2, 0xdc, 0x2a, 0x2b, 0xec, 0x6b, 0xd1, 0x0a, 0xbf, 0xd7,
0x60, 0x33, 0xc6, 0xcf, 0x05, 0x6a, 0xc3, 0x8e, 0xc1, 0xa3, 0x1c, 0x95, 0x30, 0x43, 0x92, 0xdc,
0x69, 0x38, 0xcd, 0x9d, 0x23, 0x1e, 0xd9, 0xa9, 0xd1, 0x75, 0x24, 0x7a, 0xbb, 0xe4, 0xf1, 0x4d,
0x94, 0xb5, 0xc0, 0x1d, 0x08, 0x23, 0xfa, 0x42, 0x23, 0x5f, 0xb3, 0xb5, 0xdd, 0x3f, 0x6a, 0xdd,
0x6b, 0x1c, 0x97, 0x41, 0x16, 0xc2, 0x56, 0x8e, 0xa8, 0x7a, 0x67, 0x89, 0x51, 0x43, 0x99, 0xf1,
0x6a, 0xc3, 0x69, 0x7a, 0xf1, 0x8a, 0xc7, 0x1a, 0x50, 0xd7, 0xa8, 0x2e, 0x51, 0xf5, 0xe4, 0x00,
0x27, 0x7c, 0xbd, 0xe1, 0x34, 0xb7, 0xe3, 0xdb, 0x16, 0xdb, 0x03, 0xd7, 0xa0, 0x36, 0x6f, 0xc4,
0x18, 0x79, 0xcd, 0x4e, 0x28, 0x35, 0x3b, 0x86, 0x27, 0x19, 0x51, 0x36, 0xc2, 0xce, 0x88, 0x8a,
0xc1, 0x99, 0xa2, 0x4f, 0x98, 0x2e, 0x92, 0x1b, 0x36, 0xf9, 0x00, 0x65, 0xcf, 0xe0, 0xf1, 0x2d,
0x92, 0x18, 0x52, 0x22, 0xc3, 0x53, 0x9c, 0xf2, 0x4d, 0x5b, 0xbb, 0x1f, 0xb2, 0x36, 0x3c, 0xbd,
0x0b, 0xda, 0x45, 0x7a, 0x81, 0x8b, 0x9d, 0xae, 0x2d, 0xff, 0x35, 0xc3, 0x5e, 0x42, 0x70, 0x97,
0x27, 0x45, 0xbf, 0x3b, 0x54, 0x98, 0x1a, 0x52, 0x53, 0xee, 0xd9, 0x29, 0xff, 0x48, 0xb1, 0x7d,
0xd8, 0xfe, 0x42, 0x74, 0x81, 0x98, 0xa3, 0x7a, 0x3d, 0xed, 0x75, 0x39, 0xd8, 0x2f, 0xb7, 0x6a,
0xb2, 0xe7, 0xb0, 0x7b, 0x63, 0x88, 0x49, 0x67, 0x34, 0x44, 0x69, 0x3a, 0x72, 0x22, 0x35, 0xaf,
0x37, 0x9c, 0x66, 0x35, 0x7e, 0x08, 0xb3, 0x08, 0x58, 0x89, 0x12, 0x29, 0xf2, 0x0e, 0x15, 0xd2,
0xf0, 0x2d, 0x5b, 0xba, 0x87, 0x84, 0x27, 0xe0, 0x95, 0x87, 0xc3, 0x3c, 0xa8, 0x25, 0x46, 0x28,
0xe3, 0x57, 0x98, 0x0b, 0xeb, 0x89, 0xa1, 0xdc, 0x77, 0x58, 0x7d, 0x7e, 0x85, 0xda, 0xda, 0x6b,
0x6c, 0x1b, 0xbc, 0xf7, 0xf9, 0x88, 0xc4, 0xe0, 0x15, 0x65, 0x7e, 0x35, 0x7c, 0x01, 0xee, 0xf2,
0x7e, 0x18, 0xc0, 0x06, 0x9a, 0x74, 0x70, 0xd9, 0xf2, 0x2b, 0xe5, 0xfb, 0xc8, 0x77, 0xe6, 0x95,
0x0f, 0x44, 0xa7, 0x76, 0xaf, 0xbf, 0x36, 0x47, 0x1d, 0x92, 0xba, 0x18, 0xf9, 0xd5, 0x70, 0x1f,
0xdc, 0x18, 0x75, 0x4e, 0x52, 0x23, 0xe3, 0xb0, 0xa9, 0x8b, 0x34, 0x45, 0xad, 0xed, 0x7d, 0xbb,
0xf1, 0x52, 0x1e, 0x9d, 0x40, 0xfd, 0x9d, 0x12, 0x52, 0xe7, 0xa4, 0x0c, 0x2a, 0x76, 0x00, 0xae,
0x95, 0xe7, 0xa8, 0xd8, 0xce, 0xea, 0x31, 0xef, 0xfd, 0x57, 0xea, 0xc5, 0xd4, 0xb0, 0xd2, 0x7e,
0x74, 0xf5, 0x33, 0xa8, 0x5c, 0xcd, 0x02, 0xe7, 0xdb, 0x2c, 0x70, 0x7e, 0xcc, 0x02, 0xe7, 0xeb,
0xaf, 0xa0, 0xd2, 0xdf, 0xb0, 0xbf, 0x58, 0xeb, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x19,
0x2e, 0xef, 0xaf, 0x03, 0x00, 0x00,
0x8b, 0x6c, 0x30, 0x12, 0x54, 0xa8, 0x0b, 0x56, 0x24, 0xaa, 0x14, 0xd1, 0x3f, 0xc8, 0x6e, 0x37,
0xdd, 0x4d, 0x9c, 0x8b, 0x9b, 0x92, 0xcc, 0x75, 0x67, 0xc6, 0x28, 0xe1, 0x49, 0xfa, 0x48, 0x2c,
0xfb, 0x08, 0x6d, 0xfa, 0x0c, 0xdd, 0x57, 0x9e, 0x10, 0x43, 0x0a, 0xb4, 0xbb, 0x7b, 0xce, 0xef,
0x9c, 0x3b, 0x92, 0x75, 0x0d, 0xff, 0x8b, 0x0c, 0xa5, 0xd9, 0x1f, 0xa3, 0xd6, 0x22, 0xc3, 0x28,
0x57, 0x64, 0x88, 0x35, 0xac, 0xb9, 0xb3, 0x97, 0x0d, 0xcd, 0xa7, 0xa2, 0x1f, 0xa5, 0x34, 0xde,
0xcf, 0x28, 0xa3, 0x7d, 0x4b, 0xfb, 0xc5, 0xb9, 0x55, 0x56, 0xd8, 0x69, 0xde, 0x0a, 0x7f, 0x35,
0x60, 0x3d, 0xc6, 0x2f, 0x05, 0x6a, 0xc3, 0x8e, 0xc0, 0xa3, 0x1c, 0x95, 0x30, 0x43, 0x92, 0xdc,
0x69, 0x39, 0xed, 0xad, 0x03, 0x1e, 0xd9, 0xad, 0xd1, 0x4d, 0x24, 0x7a, 0xb7, 0xe0, 0xf1, 0x6d,
0x94, 0x1d, 0x82, 0x3b, 0x10, 0x46, 0xf4, 0x85, 0x46, 0xbe, 0x62, 0x6b, 0xdb, 0x7f, 0xd4, 0xba,
0x37, 0x38, 0xae, 0x82, 0x2c, 0x84, 0x8d, 0x1c, 0x51, 0xf5, 0xce, 0x12, 0xa3, 0x86, 0x32, 0xe3,
0xf5, 0x96, 0xd3, 0xf6, 0xe2, 0x25, 0x8f, 0xb5, 0xa0, 0xa9, 0x51, 0x5d, 0xa2, 0xea, 0xc9, 0x01,
0x4e, 0xf8, 0x6a, 0xcb, 0x69, 0x6f, 0xc6, 0x77, 0x2d, 0xb6, 0x03, 0xae, 0x41, 0x6d, 0xde, 0x8a,
0x31, 0xf2, 0x86, 0xdd, 0x50, 0x69, 0x76, 0x04, 0xcf, 0x32, 0xa2, 0x6c, 0x84, 0x9d, 0x11, 0x15,
0x83, 0x33, 0x45, 0x9f, 0x31, 0x9d, 0x27, 0xd7, 0x6c, 0xf2, 0x11, 0xca, 0x5e, 0xc0, 0xd3, 0x3b,
0x24, 0x31, 0xa4, 0x44, 0x86, 0xa7, 0x38, 0xe5, 0xeb, 0xb6, 0xf6, 0x30, 0x64, 0x27, 0xf0, 0xfc,
0x3e, 0x38, 0x29, 0xd2, 0x0b, 0x9c, 0xbf, 0xe9, 0xda, 0xf2, 0x5f, 0x33, 0xec, 0x15, 0x04, 0xf7,
0x79, 0x52, 0xf4, 0xbb, 0x43, 0x85, 0xa9, 0x21, 0x35, 0xe5, 0x9e, 0xdd, 0xf2, 0x8f, 0x14, 0xdb,
0x85, 0xcd, 0x2b, 0xa2, 0x0b, 0xc4, 0x1c, 0xd5, 0x9b, 0x69, 0xaf, 0xcb, 0xc1, 0x7e, 0xb9, 0x65,
0x93, 0xbd, 0x84, 0xed, 0x5b, 0x43, 0x4c, 0x3a, 0xa3, 0x21, 0x4a, 0xd3, 0x91, 0x13, 0xa9, 0x79,
0xb3, 0xe5, 0xb4, 0xeb, 0xf1, 0x63, 0x98, 0x45, 0xc0, 0x2a, 0x94, 0x48, 0x91, 0x77, 0xa8, 0x90,
0x86, 0x6f, 0xd8, 0xd2, 0x03, 0x24, 0x3c, 0x06, 0xaf, 0x3a, 0x1c, 0xe6, 0x41, 0x23, 0x31, 0x42,
0x19, 0xbf, 0xc6, 0x5c, 0x58, 0x4d, 0x0c, 0xe5, 0xbe, 0xc3, 0x9a, 0xe5, 0x15, 0x6a, 0x6b, 0xaf,
0xb0, 0x4d, 0xf0, 0x3e, 0xe4, 0x23, 0x12, 0x83, 0xd7, 0x94, 0xf9, 0xf5, 0x30, 0x01, 0x77, 0x71,
0x3f, 0x0c, 0x60, 0x0d, 0x4d, 0x3a, 0xb8, 0x3c, 0xf4, 0x6b, 0xd5, 0x7c, 0xe0, 0x3b, 0x65, 0xe5,
0x23, 0xd1, 0xa9, 0x7d, 0xd7, 0x5f, 0x29, 0x51, 0x87, 0xa4, 0x2e, 0x46, 0x7e, 0xbd, 0x7c, 0xef,
0xaa, 0xcc, 0xf9, 0xab, 0xe5, 0x98, 0xda, 0xb1, 0x11, 0xee, 0x82, 0x1b, 0xa3, 0xce, 0x49, 0x6a,
0x64, 0x1c, 0xd6, 0x75, 0x91, 0xa6, 0xa8, 0xb5, 0xbd, 0x7a, 0x37, 0x5e, 0xc8, 0x83, 0x63, 0x68,
0xbe, 0x57, 0x42, 0xea, 0x9c, 0x94, 0x41, 0xc5, 0xf6, 0xc0, 0xb5, 0xf2, 0x1c, 0x15, 0xdb, 0x5a,
0x3e, 0xf1, 0x9d, 0xff, 0x2a, 0x3d, 0xdf, 0x1a, 0xd6, 0x4e, 0x9e, 0x5c, 0xff, 0x08, 0x6a, 0xd7,
0xb3, 0xc0, 0xf9, 0x36, 0x0b, 0x9c, 0xef, 0xb3, 0xc0, 0xf9, 0xfa, 0x33, 0xa8, 0xf5, 0xd7, 0xec,
0x8f, 0x77, 0xf8, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x0a, 0xb2, 0x57, 0xe6, 0xc5, 0x03, 0x00, 0x00,
}

View File

@ -24,6 +24,8 @@ message Request {
etcdv2 = 1;
ZooKeeper = 2;
Consul = 3;
zetcd = 4;
cetcd = 5;
}
Operation operation = 1;

20
glide.lock generated
View File

@ -1,5 +1,5 @@
hash: ba57a9a4b64241befe15f1120a0ab284e5df4d803c9fea7e40fa40d54431cd54
updated: 2016-12-06T10:29:34.171946043-08:00
hash: 75a5bcd048035387fc2786ae1c6d59635ee1c301f0d24771185064f10720f09a
updated: 2016-12-13T10:52:33.09408304-08:00
imports:
- name: bitbucket.org/zombiezen/gopdf
version: 1c63dc69751bc45441c2ce1f56b631c55294b4d5
@ -25,7 +25,7 @@ imports:
- name: github.com/cheggaaa/pb
version: 6e9d17711bb763b26b68b3931d47f24c1323abab
- name: github.com/coreos/etcd
version: cfd10b4bbfc6a9a1cf2d3f5ec8e31dae31e15122
version: 1a8e78cd55fe145987c851a2adb98adb188e2964
subpackages:
- auth/authpb
- client
@ -55,7 +55,7 @@ imports:
- proto
- protoc-gen-gogo/descriptor
- name: github.com/golang/freetype
version: 38b4c392adc5eed94207994c4848fff99f4ac234
version: d9be45aaf7452cc30c0ceb1b1bf7efe1d17b7c87
subpackages:
- raster
- truetype
@ -100,13 +100,13 @@ imports:
subpackages:
- process
- name: github.com/hashicorp/consul
version: 4e97f36614bf2877b7065f8e4ce4fc16cd7dd9ca
version: 763fc2f90299e558a1fb179951e349f19d9d9bd1
subpackages:
- api
- name: github.com/hashicorp/go-cleanhttp
version: ad28ea4487f05916463e2423a55166280e8254b5
- name: github.com/hashicorp/serf
version: f679d7594a349263f6118db40d87122d3a474e7d
version: d3a67ab21bc8a4643fa53a3633f2d951dd50c6ca
subpackages:
- coordinate
- name: github.com/inconshreveable/mousetrap
@ -125,7 +125,7 @@ imports:
- name: github.com/olekukonko/tablewriter
version: cca8bbc0798408af109aaaa239cbd2634846b340
- name: github.com/prometheus/client_golang
version: e51041b3fa41cece0dca035740ba6411905be473
version: c5b7fccd204277076155f10851dad72b76a49317
subpackages:
- prometheus
- name: github.com/prometheus/client_model
@ -139,13 +139,13 @@ imports:
- internal/bitbucket.org/ww/goautoneg
- model
- name: github.com/prometheus/procfs
version: abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
version: fcdb11ccb4389efb1b210b7ffb623ab71c5fdd60
- name: github.com/samuel/go-zookeeper
version: 1d7be4effb13d2d908342d349d71a284a7542693
subpackages:
- zk
- name: github.com/spf13/cobra
version: 6b74a60562f5c1c920299b8f02d153e16f4897fc
version: 9495bc009a56819bdb0ddbc1a373e29c140bc674
- name: github.com/spf13/pflag
version: 5ccb023bc27df288a957c5e994cd44fd19619465
- name: github.com/ugorji/go
@ -153,7 +153,7 @@ imports:
subpackages:
- codec
- name: golang.org/x/image
version: e2d0a9f0e684ecd17e2f185e3d055cb94811f22e
version: 88b013251f75f7c43fdc42017751c3493cc3b8f7
subpackages:
- draw
- font

View File

@ -9,7 +9,7 @@ import:
- package: github.com/cheggaaa/pb
version: 6e9d17711bb763b26b68b3931d47f24c1323abab
- package: github.com/coreos/etcd
version: cfd10b4bbfc6a9a1cf2d3f5ec8e31dae31e15122
version: 1a8e78cd55fe145987c851a2adb98adb188e2964
subpackages:
- auth/authpb
- client
@ -57,7 +57,7 @@ import:
subpackages:
- process
- package: github.com/hashicorp/consul
version: 4e97f36614bf2877b7065f8e4ce4fc16cd7dd9ca
version: 763fc2f90299e558a1fb179951e349f19d9d9bd1
subpackages:
- api
- package: github.com/samuel/go-zookeeper
@ -65,7 +65,7 @@ import:
subpackages:
- zk
- package: github.com/spf13/cobra
version: 6b74a60562f5c1c920299b8f02d153e16f4897fc
version: 9495bc009a56819bdb0ddbc1a373e29c140bc674
- package: golang.org/x/net
version: 6acef71eb69611914f7a30939ea9f6e194c78172
subpackages:

View File

@ -209,6 +209,7 @@ func (g *GlyphBuf) load(recursion uint32, i Index, useMyMetrics bool) (err error
g.addPhantomsAndScale(len(g.Points), len(g.Points), true, true)
copy(g.phantomPoints[:], g.Points[len(g.Points)-4:])
g.Points = g.Points[:len(g.Points)-4]
// TODO: also trim g.InFontUnits and g.Unhinted?
return nil
}
@ -282,6 +283,10 @@ func (g *GlyphBuf) loadSimple(glyf []byte, ne int) (program []byte) {
program = glyf[offset : offset+instrLen]
offset += instrLen
if ne == 0 {
return program
}
np0 := len(g.Points)
np1 := np0 + int(g.Ends[len(g.Ends)-1])

View File

@ -7,11 +7,6 @@ SoundCloud Ltd. (http://soundcloud.com/).
The following components are included in this product:
goautoneg
http://bitbucket.org/ww/goautoneg
Copyright 2011, Open Knowledge Foundation Ltd.
See README.txt for license details.
perks - a fork of https://github.com/bmizerany/perks
https://github.com/beorn7/perks
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein

View File

@ -15,15 +15,15 @@ package prometheus
// Collector is the interface implemented by anything that can be used by
// Prometheus to collect metrics. A Collector has to be registered for
// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
// collection. See Registerer.Register.
//
// The stock metrics provided by this package (like Gauge, Counter, Summary) are
// also Collectors (which only ever collect one metric, namely itself). An
// implementer of Collector may, however, collect multiple metrics in a
// coordinated fashion and/or create metrics on the fly. Examples for collectors
// already implemented in this library are the metric vectors (i.e. collection
// of multiple instances of the same Metric but with different label values)
// like GaugeVec or SummaryVec, and the ExpvarCollector.
// The stock metrics provided by this package (Gauge, Counter, Summary,
// Histogram, Untyped) are also Collectors (which only ever collect one metric,
// namely itself). An implementer of Collector may, however, collect multiple
// metrics in a coordinated fashion and/or create metrics on the fly. Examples
// for collectors already implemented in this library are the metric vectors
// (i.e. collection of multiple instances of the same Metric but with different
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
type Collector interface {
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector to the provided channel and returns once
@ -37,39 +37,39 @@ type Collector interface {
// executing this method, it must send an invalid descriptor (created
// with NewInvalidDesc) to signal the error to the registry.
Describe(chan<- *Desc)
// Collect is called by Prometheus when collecting metrics. The
// implementation sends each collected metric via the provided channel
// and returns once the last metric has been sent. The descriptor of
// each sent metric is one of those returned by Describe. Returned
// metrics that share the same descriptor must differ in their variable
// label values. This method may be called concurrently and must
// therefore be implemented in a concurrency safe way. Blocking occurs
// at the expense of total performance of rendering all registered
// metrics. Ideally, Collector implementations support concurrent
// readers.
// Collect is called by the Prometheus registry when collecting
// metrics. The implementation sends each collected metric via the
// provided channel and returns once the last metric has been sent. The
// descriptor of each sent metric is one of those returned by
// Describe. Returned metrics that share the same descriptor must differ
// in their variable label values. This method may be called
// concurrently and must therefore be implemented in a concurrency safe
// way. Blocking occurs at the expense of total performance of rendering
// all registered metrics. Ideally, Collector implementations support
// concurrent readers.
Collect(chan<- Metric)
}
// SelfCollector implements Collector for a single Metric so that that the
// Metric collects itself. Add it as an anonymous field to a struct that
// implements Metric, and call Init with the Metric itself as an argument.
type SelfCollector struct {
// selfCollector implements Collector for a single Metric so that the Metric
// collects itself. Add it as an anonymous field to a struct that implements
// Metric, and call init with the Metric itself as an argument.
type selfCollector struct {
self Metric
}
// Init provides the SelfCollector with a reference to the metric it is supposed
// init provides the selfCollector with a reference to the metric it is supposed
// to collect. It is usually called within the factory function to create a
// metric. See example.
func (c *SelfCollector) Init(self Metric) {
func (c *selfCollector) init(self Metric) {
c.self = self
}
// Describe implements Collector.
func (c *SelfCollector) Describe(ch chan<- *Desc) {
func (c *selfCollector) Describe(ch chan<- *Desc) {
ch <- c.self.Desc()
}
// Collect implements Collector.
func (c *SelfCollector) Collect(ch chan<- Metric) {
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}

View File

@ -15,7 +15,6 @@ package prometheus
import (
"errors"
"hash/fnv"
)
// Counter is a Metric that represents a single numerical value that only ever
@ -36,6 +35,9 @@ type Counter interface {
// Prometheus metric. Do not use it for regular handling of a
// Prometheus counter (as it can be used to break the contract of
// monotonically increasing values).
//
// Deprecated: Use NewConstMetric to create a counter for an external
// value. A Counter should never be set.
Set(float64)
// Inc increments the counter by 1.
Inc()
@ -56,7 +58,7 @@ func NewCounter(opts CounterOpts) Counter {
opts.ConstLabels,
)
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
result.Init(result) // Init self-collection.
result.init(result) // Init self-collection.
return result
}
@ -80,7 +82,7 @@ func (c *counter) Add(v float64) {
// CounterVec embeds MetricVec. See there for a full list of methods with
// detailed documentation.
type CounterVec struct {
MetricVec
*MetricVec
}
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
@ -94,20 +96,15 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
opts.ConstLabels,
)
return &CounterVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
result := &counter{value: value{
desc: desc,
valType: CounterValue,
labelPairs: makeLabelPairs(desc, lvs),
}}
result.Init(result) // Init self-collection.
return result
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
result := &counter{value: value{
desc: desc,
valType: CounterValue,
labelPairs: makeLabelPairs(desc, lvs),
}}
result.init(result) // Init self-collection.
return result
}),
}
}

View File

@ -1,10 +1,21 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"bytes"
"errors"
"fmt"
"hash/fnv"
"regexp"
"sort"
"strings"
@ -131,31 +142,24 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
d.err = errors.New("duplicate label names")
return d
}
h := fnv.New64a()
var b bytes.Buffer // To copy string contents into, avoiding []byte allocations.
vh := hashNew()
for _, val := range labelValues {
b.Reset()
b.WriteString(val)
b.WriteByte(separatorByte)
h.Write(b.Bytes())
vh = hashAdd(vh, val)
vh = hashAddByte(vh, separatorByte)
}
d.id = h.Sum64()
d.id = vh
// Sort labelNames so that order doesn't matter for the hash.
sort.Strings(labelNames)
// Now hash together (in this order) the help string and the sorted
// label names.
h.Reset()
b.Reset()
b.WriteString(help)
b.WriteByte(separatorByte)
h.Write(b.Bytes())
lh := hashNew()
lh = hashAdd(lh, help)
lh = hashAddByte(lh, separatorByte)
for _, labelName := range labelNames {
b.Reset()
b.WriteString(labelName)
b.WriteByte(separatorByte)
h.Write(b.Bytes())
lh = hashAdd(lh, labelName)
lh = hashAddByte(lh, separatorByte)
}
d.dimHash = h.Sum64()
d.dimHash = lh
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
for n, v := range constLabels {

View File

@ -11,18 +11,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package prometheus provides embeddable metric primitives for servers and
// standardized exposition of telemetry through a web services interface.
// Package prometheus provides metrics primitives to instrument code for
// monitoring. It also offers a registry for metrics. Sub-packages allow to
// expose the registered metrics via HTTP (package promhttp) or push them to a
// Pushgateway (package push).
//
// All exported functions and methods are safe to be used concurrently unless
// specified otherwise.
//specified otherwise.
//
// To expose metrics registered with the Prometheus registry, an HTTP server
// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
// A Basic Example
//
// http.Handle("/metrics", prometheus.Handler())
//
// As a starting point a very basic usage example:
// As a starting point, a very basic usage example:
//
// package main
//
@ -30,6 +29,7 @@
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
// "github.com/prometheus/client_golang/prometheus/promhttp"
// )
//
// var (
@ -37,73 +37,145 @@
// Name: "cpu_temperature_celsius",
// Help: "Current temperature of the CPU.",
// })
// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
// Name: "hd_errors_total",
// Help: "Number of hard-disk errors.",
// })
// hdFailures = prometheus.NewCounterVec(
// prometheus.CounterOpts{
// Name: "hd_errors_total",
// Help: "Number of hard-disk errors.",
// },
// []string{"device"},
// )
// )
//
// func init() {
// // Metrics have to be registered to be exposed:
// prometheus.MustRegister(cpuTemp)
// prometheus.MustRegister(hdFailures)
// }
//
// func main() {
// cpuTemp.Set(65.3)
// hdFailures.Inc()
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
//
// http.Handle("/metrics", prometheus.Handler())
// // The Handler function provides a default handler to expose metrics
// // via an HTTP server. "/metrics" is the usual endpoint for that.
// http.Handle("/metrics", promhttp.Handler())
// http.ListenAndServe(":8080", nil)
// }
//
//
// This is a complete program that exports two metrics, a Gauge and a Counter.
// It also exports some stats about the HTTP usage of the /metrics
// endpoint. (See the Handler function for more detail.)
// This is a complete program that exports two metrics, a Gauge and a Counter,
// the latter with a label attached to turn it into a (one-dimensional) vector.
//
// Two more advanced metric types are the Summary and Histogram.
// Metrics
//
// In addition to the fundamental metric types Gauge, Counter, Summary, and
// Histogram, a very important part of the Prometheus data model is the
// partitioning of samples along dimensions called labels, which results in
// The number of exported identifiers in this package might appear a bit
// overwhelming. Hovever, in addition to the basic plumbing shown in the example
// above, you only need to understand the different metric types and their
// vector versions for basic usage.
//
// Above, you have already touched the Counter and the Gauge. There are two more
// advanced metric types: the Summary and Histogram. A more thorough description
// of those four metric types can be found in the Prometheus docs:
// https://prometheus.io/docs/concepts/metric_types/
//
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
// Prometheus server not to assume anything about its type.
//
// In addition to the fundamental metric types Gauge, Counter, Summary,
// Histogram, and Untyped, a very important part of the Prometheus data model is
// the partitioning of samples along dimensions called labels, which results in
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
// and HistogramVec.
// HistogramVec, and UntypedVec.
//
// Those are all the parts needed for basic usage. Detailed documentation and
// examples are provided below.
// While only the fundamental metric types implement the Metric interface, both
// the metrics and their vector versions implement the Collector interface. A
// Collector manages the collection of a number of Metrics, but for convenience,
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
// SummaryVec, HistogramVec, and UntypedVec are not.
//
// Everything else this package offers is essentially for "power users" only. A
// few pointers to "power user features":
// To create instances of Metrics and their vector versions, you need a suitable
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
// HistogramOpts, or UntypedOpts.
//
// All the various ...Opts structs have a ConstLabels field for labels that
// never change their value (which is only useful under special circumstances,
// see documentation of the Opts type).
// Custom Collectors and constant Metrics
//
// The Untyped metric behaves like a Gauge, but signals the Prometheus server
// not to assume anything about its type.
// While you could create your own implementations of Metric, most likely you
// will only ever implement the Collector interface on your own. At a first
// glance, a custom Collector seems handy to bundle Metrics for common
// registration (with the prime example of the different metric vectors above,
// which bundle all the metrics of the same name but with different labels).
//
// Functions to fine-tune how the metric registry works: EnableCollectChecks,
// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
// There is a more involved use case, too: If you already have metrics
// available, created outside of the Prometheus context, you don't need the
// interface of the various Metric types. You essentially want to mirror the
// existing numbers into Prometheus Metrics during collection. An own
// implementation of the Collector interface is perfect for that. You can create
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
// NewConstSummary (and their respective Must… versions). That will happen in
// the Collect method. The Describe method has to return separate Desc
// instances, representative of the “throw-away” metrics to be created
// later. NewDesc comes in handy to create those Desc instances.
//
// For custom metric collection, there are two entry points: Custom Metric
// implementations and custom Collector implementations. A Metric is the
// fundamental unit in the Prometheus data model: a sample at a point in time
// together with its meta-data (like its fully-qualified name and any number of
// pairs of label name and label value) that knows how to marshal itself into a
// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
// gets registered with the Prometheus registry and manages the collection of
// one or more Metrics. Many parts of this package are building blocks for
// Metrics and Collectors. Desc is the metric descriptor, actually used by all
// metrics under the hood, and by Collectors to describe the Metrics to be
// collected, but only to be dealt with by users if they implement their own
// Metrics or Collectors. To create a Desc, the BuildFQName function will come
// in handy. Other useful components for Metric and Collector implementation
// include: LabelPairSorter to sort the DTO version of label pairs,
// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
// collection time, MetricVec to bundle custom Metrics into a metric vector
// Collector, SelfCollector to make a custom Metric collect itself.
// The Collector example illustrates the use case. You can also look at the
// source code of the processCollector (mirroring process metrics), the
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
// metrics) as examples that are used in this package itself.
//
// A good example for a custom Collector is the ExpVarCollector included in this
// package, which exports variables exported via the "expvar" package as
// Prometheus metrics.
// If you just need to call a function to get a single float value to collect as
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
// shortcuts.
//
// Advanced Uses of the Registry
//
// While MustRegister is the by far most common way of registering a Collector,
// sometimes you might want to handle the errors the registration might
// cause. As suggested by the name, MustRegister panics if an error occurs. With
// the Register function, the error is returned and can be handled.
//
// An error is returned if the registered Collector is incompatible or
// inconsistent with already registered metrics. The registry aims for
// consistency of the collected metrics according to the Prometheus data
// model. Inconsistencies are ideally detected at registration time, not at
// collect time. The former will usually be detected at start-up time of a
// program, while the latter will only happen at scrape time, possibly not even
// on the first scrape if the inconsistency only becomes relevant later. That is
// the main reason why a Collector and a Metric have to describe themselves to
// the registry.
//
// So far, everything we did operated on the so-called default registry, as it
// can be found in the global DefaultRegistry variable. With NewRegistry, you
// can create a custom registry, or you can even implement the Registerer or
// Gatherer interfaces yourself. The methods Register and Unregister work in
// the same way on a custom registry as the global functions Register and
// Unregister on the default registry.
//
// There are a number of uses for custom registries: You can use registries
// with special properties, see NewPedanticRegistry. You can avoid global state,
// as it is imposed by the DefaultRegistry. You can use multiple registries at
// the same time to expose different metrics in different ways. You can use
// separate registries for testing purposes.
//
// Also note that the DefaultRegistry comes registered with a Collector for Go
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
// NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register.
//
// HTTP Exposition
//
// The Registry implements the Gatherer interface. The caller of the Gather
// method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example
// above. The tools to expose metrics via HTTP are in the promhttp
// sub-package. (The top-level functions in the prometheus package are
// deprecated.)
//
// Pushing to the Pushgateway
//
// Function for pushing to the Pushgateway can be found in the push sub-package.
//
// Other Means of Exposition
//
// More ways of exposing metrics can easily be added. Sending metrics to
// Graphite would be an example that will soon be implemented.
package prometheus

View File

@ -18,21 +18,21 @@ import (
"expvar"
)
// ExpvarCollector collects metrics from the expvar interface. It provides a
// quick way to expose numeric values that are already exported via expvar as
// Prometheus metrics. Note that the data models of expvar and Prometheus are
// fundamentally different, and that the ExpvarCollector is inherently
// slow. Thus, the ExpvarCollector is probably great for experiments and
// prototying, but you should seriously consider a more direct implementation of
// Prometheus metrics for monitoring production systems.
//
// Use NewExpvarCollector to create new instances.
type ExpvarCollector struct {
type expvarCollector struct {
exports map[string]*Desc
}
// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
// to be registered with the Prometheus registry.
// NewExpvarCollector returns a newly allocated expvar Collector that still has
// to be registered with a Prometheus registry.
//
// An expvar Collector collects metrics from the expvar interface. It provides a
// quick way to expose numeric values that are already exported via expvar as
// Prometheus metrics. Note that the data models of expvar and Prometheus are
// fundamentally different, and that the expvar Collector is inherently slower
// than native Prometheus metrics. Thus, the expvar Collector is probably great
// for experiments and prototying, but you should seriously consider a more
// direct implementation of Prometheus metrics for monitoring production
// systems.
//
// The exports map has the following meaning:
//
@ -59,21 +59,21 @@ type ExpvarCollector struct {
// sample values.
//
// Anything that does not fit into the scheme above is silently ignored.
func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
return &ExpvarCollector{
func NewExpvarCollector(exports map[string]*Desc) Collector {
return &expvarCollector{
exports: exports,
}
}
// Describe implements Collector.
func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
func (e *expvarCollector) Describe(ch chan<- *Desc) {
for _, desc := range e.exports {
ch <- desc
}
}
// Collect implements Collector.
func (e *ExpvarCollector) Collect(ch chan<- Metric) {
func (e *expvarCollector) Collect(ch chan<- Metric) {
for name, desc := range e.exports {
var m Metric
expVar := expvar.Get(name)

View File

@ -0,0 +1,29 @@
package prometheus
// Inline and byte-free variant of hash/fnv's fnv64a.
const (
offset64 = 14695981039346656037
prime64 = 1099511628211
)
// hashNew initializies a new fnv64a hash value.
func hashNew() uint64 {
return offset64
}
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
func hashAdd(h uint64, s string) uint64 {
for i := 0; i < len(s); i++ {
h ^= uint64(s[i])
h *= prime64
}
return h
}
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
func hashAddByte(h uint64, b byte) uint64 {
h ^= uint64(b)
h *= prime64
return h
}

View File

@ -13,8 +13,6 @@
package prometheus
import "hash/fnv"
// Gauge is a Metric that represents a single numerical value that can
// arbitrarily go up and down.
//
@ -60,7 +58,7 @@ func NewGauge(opts GaugeOpts) Gauge {
// (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec.
type GaugeVec struct {
MetricVec
*MetricVec
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
@ -74,14 +72,9 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
opts.ConstLabels,
)
return &GaugeVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
return newValue(desc, GaugeValue, 0, lvs...)
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newValue(desc, GaugeValue, 0, lvs...)
}),
}
}

View File

@ -17,7 +17,7 @@ type goCollector struct {
// NewGoCollector returns a collector which exports metrics about the current
// go process.
func NewGoCollector() *goCollector {
func NewGoCollector() Collector {
return &goCollector{
goroutines: NewGauge(GaugeOpts{
Namespace: "go",
@ -211,7 +211,7 @@ func NewGoCollector() *goCollector {
"Number of seconds since 1970 of last garbage collection.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC*10 ^ 9) },
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
valType: GaugeValue,
},
},

View File

@ -15,7 +15,6 @@ package prometheus
import (
"fmt"
"hash/fnv"
"math"
"sort"
"sync/atomic"
@ -52,11 +51,11 @@ type Histogram interface {
// bucket of a histogram ("le" -> "less or equal").
const bucketLabel = "le"
// DefBuckets are the default Histogram buckets. The default buckets are
// tailored to broadly measure the response time (in seconds) of a network
// service. Most likely, however, you will be required to define buckets
// customized to your use case.
var (
// DefBuckets are the default Histogram buckets. The default buckets are
// tailored to broadly measure the response time (in seconds) of a
// network service. Most likely, however, you will be required to define
// buckets customized to your use case.
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
errBucketLabelNotAllowed = fmt.Errorf(
@ -211,7 +210,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
// Finally we know the final length of h.upperBounds and can make counts.
h.counts = make([]uint64, len(h.upperBounds))
h.Init(h) // Init self-collection.
h.init(h) // Init self-collection.
return h
}
@ -223,7 +222,7 @@ type histogram struct {
sumBits uint64
count uint64
SelfCollector
selfCollector
// Note that there is no mutex required.
desc *Desc
@ -288,7 +287,7 @@ func (h *histogram) Write(out *dto.Metric) error {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec.
type HistogramVec struct {
MetricVec
*MetricVec
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
@ -302,14 +301,9 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
opts.ConstLabels,
)
return &HistogramVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
}),
}
}

View File

@ -15,14 +15,114 @@ package prometheus
import (
"bufio"
"bytes"
"compress/gzip"
"fmt"
"io"
"net"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/prometheus/common/expfmt"
)
// TODO(beorn7): Remove this whole file. It is a partial mirror of
// promhttp/http.go (to avoid circular import chains) where everything HTTP
// related should live. The functions here are just for avoiding
// breakage. Everything is deprecated.
const (
contentTypeHeader = "Content-Type"
contentLengthHeader = "Content-Length"
contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding"
)
var bufPool sync.Pool
func getBuf() *bytes.Buffer {
buf := bufPool.Get()
if buf == nil {
return &bytes.Buffer{}
}
return buf.(*bytes.Buffer)
}
func giveBuf(buf *bytes.Buffer) {
buf.Reset()
bufPool.Put(buf)
}
// Handler returns an HTTP handler for the DefaultGatherer. It is
// already instrumented with InstrumentHandler (using "prometheus" as handler
// name).
//
// Deprecated: Please note the issues described in the doc comment of
// InstrumentHandler. You might want to consider using promhttp.Handler instead
// (which is non instrumented).
func Handler() http.Handler {
return InstrumentHandler("prometheus", UninstrumentedHandler())
}
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
//
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
func UninstrumentedHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
mfs, err := DefaultGatherer.Gather()
if err != nil {
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
contentType := expfmt.Negotiate(req.Header)
buf := getBuf()
defer giveBuf(buf)
writer, encoding := decorateWriter(req, buf)
enc := expfmt.NewEncoder(writer, contentType)
var lastErr error
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
lastErr = err
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
}
if closer, ok := writer.(io.Closer); ok {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
header.Set(contentTypeHeader, string(contentType))
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
if encoding != "" {
header.Set(contentEncodingHeader, encoding)
}
w.Write(buf.Bytes())
})
}
// decorateWriter wraps a writer to handle gzip compression if requested. It
// returns the decorated writer and the appropriate "Content-Encoding" header
// (which is empty if no compression is enabled).
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
header := request.Header.Get(acceptEncodingHeader)
parts := strings.Split(header, ",")
for _, part := range parts {
part := strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return gzip.NewWriter(writer), "gzip"
}
}
return writer, ""
}
var instLabels = []string{"method", "code"}
type nower interface {
@ -57,12 +157,34 @@ func nowSeries(t ...time.Time) nower {
// has a constant label named "handler" with the provided handlerName as
// value. http_requests_total is a metric vector partitioned by HTTP method
// (label name "method") and HTTP status code (label name "code").
//
// Deprecated: InstrumentHandler has several issues:
//
// - It uses Summaries rather than Histograms. Summaries are not useful if
// aggregation across multiple instances is required.
//
// - It uses microseconds as unit, which is deprecated and should be replaced by
// seconds.
//
// - The size of the request is calculated in a separate goroutine. Since this
// calculator requires access to the request header, it creates a race with
// any writes to the header performed during request handling.
// httputil.ReverseProxy is a prominent example for a handler
// performing such writes.
//
// Upcoming versions of this package will provide ways of instrumenting HTTP
// handlers that are more flexible and have fewer issues. Please prefer direct
// instrumentation in the meantime.
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
}
// InstrumentHandlerFunc wraps the given function for instrumentation. It
// otherwise works in the same way as InstrumentHandler.
// otherwise works in the same way as InstrumentHandler (and shares the same
// issues).
//
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
// InstrumentHandler is.
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(
SummaryOpts{
@ -73,13 +195,13 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
)
}
// InstrumentHandlerWithOpts works like InstrumentHandler but provides more
// flexibility (at the cost of a more complex call syntax). As
// InstrumentHandler, this function registers four metric collectors, but it
// uses the provided SummaryOpts to create them. However, the fields "Name" and
// "Help" in the SummaryOpts are ignored. "Name" is replaced by
// "requests_total", "request_duration_microseconds", "request_size_bytes", and
// "response_size_bytes", respectively. "Help" is replaced by an appropriate
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
// issues) but provides more flexibility (at the cost of a more complex call
// syntax). As InstrumentHandler, this function registers four metric
// collectors, but it uses the provided SummaryOpts to create them. However, the
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
// help string. The names of the variable labels of the http_requests_total
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
//
@ -98,13 +220,20 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
// and all its fields are set to the equally named fields in the provided
// SummaryOpts.
//
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
// InstrumentHandler is.
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
}
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides
// more flexibility (at the cost of a more complex call syntax). See
// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used.
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
// the same issues) but provides more flexibility (at the cost of a more complex
// call syntax). See InstrumentHandlerWithOpts for details how the provided
// SummaryOpts are used.
//
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
// as InstrumentHandler is.
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
reqCnt := NewCounterVec(
CounterOpts{

View File

@ -22,10 +22,8 @@ import (
const separatorByte byte = 255
// A Metric models a single sample value with its meta data being exported to
// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
// Untyped, and Summary. Users can implement their own Metric types, but that
// should be rarely needed. See the example for SelfCollector, which is also an
// example for a user-implemented Metric.
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
// Histogram, Summary, and Untyped.
type Metric interface {
// Desc returns the descriptor for the Metric. This method idempotently
// returns the same descriptor throughout the lifetime of the
@ -36,21 +34,23 @@ type Metric interface {
// Write encodes the Metric into a "Metric" Protocol Buffer data
// transmission object.
//
// Implementers of custom Metric types must observe concurrency safety
// as reads of this metric may occur at any time, and any blocking
// occurs at the expense of total performance of rendering all
// registered metrics. Ideally Metric implementations should support
// concurrent readers.
// Metric implementations must observe concurrency safety as reads of
// this metric may occur at any time, and any blocking occurs at the
// expense of total performance of rendering all registered
// metrics. Ideally, Metric implementations should support concurrent
// readers.
//
// The Prometheus client library attempts to minimize memory allocations
// and will provide a pre-existing reset dto.Metric pointer. Prometheus
// may recycle the dto.Metric proto message, so Metric implementations
// should just populate the provided dto.Metric and then should not keep
// any reference to it.
//
// While populating dto.Metric, labels must be sorted lexicographically.
// (Implementers may find LabelPairSorter useful for that.)
// While populating dto.Metric, it is the responsibility of the
// implementation to ensure validity of the Metric protobuf (like valid
// UTF-8 strings or syntactically valid metric and label names). It is
// recommended to sort labels lexicographically. (Implementers may find
// LabelPairSorter useful for that.) Callers of Write should still make
// sure of sorting if they depend on it.
Write(*dto.Metric) error
// TODO(beorn7): The original rationale of passing in a pre-allocated
// dto.Metric protobuf to save allocations has disappeared. The
// signature of this method should be changed to "Write() (*dto.Metric,
// error)".
}
// Opts bundles the options for creating most Metric types. Each metric

View File

@ -28,7 +28,7 @@ type processCollector struct {
// NewProcessCollector returns a collector which exports the current state of
// process metrics including cpu, memory and file descriptor usage as well as
// the process start time for the given process id under the given namespace.
func NewProcessCollector(pid int, namespace string) *processCollector {
func NewProcessCollector(pid int, namespace string) Collector {
return NewProcessCollectorPIDFn(
func() (int, error) { return pid, nil },
namespace,
@ -43,7 +43,7 @@ func NewProcessCollector(pid int, namespace string) *processCollector {
func NewProcessCollectorPIDFn(
pidFn func() (int, error),
namespace string,
) *processCollector {
) Collector {
c := processCollector{
pidFn: pidFn,
collectFn: func(chan<- Metric) {},

View File

@ -1,65 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright (c) 2013, The Prometheus Authors
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package prometheus
// Push triggers a metric collection by the default registry and pushes all
// collected metrics to the Pushgateway specified by addr. See the Pushgateway
// documentation for detailed implications of the job and instance
// parameter. instance can be left empty. You can use just host:port or ip:port
// as url, in which case 'http://' is added automatically. You can also include
// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
//
// Note that all previously pushed metrics with the same job and instance will
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
// to push to the Pushgateway.)
func Push(job, instance, url string) error {
return defRegistry.Push(job, instance, url, "PUT")
}
// PushAdd works like Push, but only previously pushed metrics with the same
// name (and the same job and instance) will be replaced. (It uses HTTP method
// 'POST' to push to the Pushgateway.)
func PushAdd(job, instance, url string) error {
return defRegistry.Push(job, instance, url, "POST")
}
// PushCollectors works like Push, but it does not collect from the default
// registry. Instead, it collects from the provided collectors. It is a
// convenient way to push only a few metrics.
func PushCollectors(job, instance, url string, collectors ...Collector) error {
return pushCollectors(job, instance, url, "PUT", collectors...)
}
// PushAddCollectors works like PushAdd, but it does not collect from the
// default registry. Instead, it collects from the provided collectors. It is a
// convenient way to push only a few metrics.
func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
return pushCollectors(job, instance, url, "POST", collectors...)
}
func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
r := newRegistry()
for _, collector := range collectors {
if _, err := r.Register(collector); err != nil {
return err
}
}
return r.Push(job, instance, url, method)
}

File diff suppressed because it is too large Load Diff

View File

@ -15,7 +15,6 @@ package prometheus
import (
"fmt"
"hash/fnv"
"math"
"sort"
"sync"
@ -54,8 +53,8 @@ type Summary interface {
Observe(float64)
}
// DefObjectives are the default Summary quantile values.
var (
// DefObjectives are the default Summary quantile values.
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
errQuantileLabelNotAllowed = fmt.Errorf(
@ -140,11 +139,11 @@ type SummaryOpts struct {
BufCap uint32
}
// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
// method of perk/quantile is actually not working as advertised - and it might
// be unfixable, as the underlying algorithm is apparently not capable of
// merging summaries in the first place. To avoid using Merge, we are currently
// adding observations to _each_ age bucket, i.e. the effort to add a sample is
// Great fuck-up with the sliding-window decay algorithm... The Merge method of
// perk/quantile is actually not working as advertised - and it might be
// unfixable, as the underlying algorithm is apparently not capable of merging
// summaries in the first place. To avoid using Merge, we are currently adding
// observations to _each_ age bucket, i.e. the effort to add a sample is
// essentially multiplied by the number of age buckets. When rotating age
// buckets, we empty the previous head stream. On scrape time, we simply take
// the quantiles from the head stream (no merging required). Result: More effort
@ -228,12 +227,12 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
}
sort.Float64s(s.sortedObjectives)
s.Init(s) // Init self-collection.
s.init(s) // Init self-collection.
return s
}
type summary struct {
SelfCollector
selfCollector
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
mtx sync.Mutex // Protects every other moving part.
@ -391,7 +390,7 @@ func (s quantSort) Less(i, j int) bool {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec.
type SummaryVec struct {
MetricVec
*MetricVec
}
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
@ -405,14 +404,9 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
opts.ConstLabels,
)
return &SummaryVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
}),
}
}

View File

@ -13,8 +13,6 @@
package prometheus
import "hash/fnv"
// Untyped is a Metric that represents a single numerical value that can
// arbitrarily go up and down.
//
@ -58,7 +56,7 @@ func NewUntyped(opts UntypedOpts) Untyped {
// labels. This is used if you want to count the same thing partitioned by
// various dimensions. Create instances with NewUntypedVec.
type UntypedVec struct {
MetricVec
*MetricVec
}
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
@ -72,14 +70,9 @@ func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
opts.ConstLabels,
)
return &UntypedVec{
MetricVec: MetricVec{
children: map[uint64]Metric{},
desc: desc,
hash: fnv.New64a(),
newMetric: func(lvs ...string) Metric {
return newValue(desc, UntypedValue, 0, lvs...)
},
},
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newValue(desc, UntypedValue, 0, lvs...)
}),
}
}

View File

@ -48,7 +48,7 @@ type value struct {
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64
SelfCollector
selfCollector
desc *Desc
valType ValueType
@ -68,7 +68,7 @@ func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...strin
valBits: math.Float64bits(val),
labelPairs: makeLabelPairs(desc, labelValues),
}
result.Init(result)
result.init(result)
return result
}
@ -113,7 +113,7 @@ func (v *value) Write(out *dto.Metric) error {
// library to back the implementations of CounterFunc, GaugeFunc, and
// UntypedFunc.
type valueFunc struct {
SelfCollector
selfCollector
desc *Desc
valType ValueType
@ -134,7 +134,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val
function: function,
labelPairs: makeLabelPairs(desc, nil),
}
result.Init(result)
result.init(result)
return result
}

View File

@ -14,10 +14,10 @@
package prometheus
import (
"bytes"
"fmt"
"hash"
"sync"
"github.com/prometheus/common/model"
)
// MetricVec is a Collector to bundle metrics of the same name that
@ -26,17 +26,32 @@ import (
// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
// provided in this package.
type MetricVec struct {
mtx sync.RWMutex // Protects not only children, but also hash and buf.
children map[uint64]Metric
mtx sync.RWMutex // Protects the children.
children map[uint64][]metricWithLabelValues
desc *Desc
// hash is our own hash instance to avoid repeated allocations.
hash hash.Hash64
// buf is used to copy string contents into it for hashing,
// again to avoid allocations.
buf bytes.Buffer
newMetric func(labelValues ...string) Metric
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
hashAddByte func(h uint64, b byte) uint64
}
newMetric func(labelValues ...string) Metric
// newMetricVec returns an initialized MetricVec. The concrete value is
// returned for embedding into another struct.
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
return &MetricVec{
children: map[uint64][]metricWithLabelValues{},
desc: desc,
newMetric: newMetric,
hashAdd: hashAdd,
hashAddByte: hashAddByte,
}
}
// metricWithLabelValues provides the metric and its label values for
// disambiguation on hash collision.
type metricWithLabelValues struct {
values []string
metric Metric
}
// Describe implements Collector. The length of the returned slice
@ -50,8 +65,10 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
m.mtx.RLock()
defer m.mtx.RUnlock()
for _, metric := range m.children {
ch <- metric
for _, metrics := range m.children {
for _, metric := range metrics {
ch <- metric.metric
}
}
}
@ -80,14 +97,12 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
}
return m.getOrCreateMetric(h, lvs...), nil
return m.getOrCreateMetricWithLabelValues(h, lvs), nil
}
// GetMetricWith returns the Metric for the given Labels map (the label names
@ -103,18 +118,12 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
}
lvs := make([]string, len(labels))
for i, label := range m.desc.variableLabels {
lvs[i] = labels[label]
}
return m.getOrCreateMetric(h, lvs...), nil
return m.getOrCreateMetricWithLabels(h, labels), nil
}
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
@ -162,11 +171,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
if err != nil {
return false
}
if _, has := m.children[h]; !has {
return false
}
delete(m.children, h)
return true
return m.deleteByHashWithLabelValues(h, lvs)
}
// Delete deletes the metric where the variable labels are the same as those
@ -187,10 +192,50 @@ func (m *MetricVec) Delete(labels Labels) bool {
if err != nil {
return false
}
if _, has := m.children[h]; !has {
return m.deleteByHashWithLabels(h, labels)
}
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
// there are multiple matches in the bucket, use lvs to select a metric and
// remove only that metric.
func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
metrics, ok := m.children[h]
if !ok {
return false
}
delete(m.children, h)
i := m.findMetricWithLabelValues(metrics, lvs)
if i >= len(metrics) {
return false
}
if len(metrics) > 1 {
m.children[h] = append(metrics[:i], metrics[i+1:]...)
} else {
delete(m.children, h)
}
return true
}
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
// are multiple matches in the bucket, use lvs to select a metric and remove
// only that metric.
func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
metrics, ok := m.children[h]
if !ok {
return false
}
i := m.findMetricWithLabels(metrics, labels)
if i >= len(metrics) {
return false
}
if len(metrics) > 1 {
m.children[h] = append(metrics[:i], metrics[i+1:]...)
} else {
delete(m.children, h)
}
return true
}
@ -208,40 +253,152 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
if len(vals) != len(m.desc.variableLabels) {
return 0, errInconsistentCardinality
}
m.hash.Reset()
h := hashNew()
for _, val := range vals {
m.buf.Reset()
m.buf.WriteString(val)
m.hash.Write(m.buf.Bytes())
h = m.hashAdd(h, val)
h = m.hashAddByte(h, model.SeparatorByte)
}
return m.hash.Sum64(), nil
return h, nil
}
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if len(labels) != len(m.desc.variableLabels) {
return 0, errInconsistentCardinality
}
m.hash.Reset()
h := hashNew()
for _, label := range m.desc.variableLabels {
val, ok := labels[label]
if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label)
}
m.buf.Reset()
m.buf.WriteString(val)
m.hash.Write(m.buf.Bytes())
h = m.hashAdd(h, val)
h = m.hashAddByte(h, model.SeparatorByte)
}
return m.hash.Sum64(), nil
return h, nil
}
func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
metric, ok := m.children[hash]
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
m.mtx.RLock()
metric, ok := m.getMetricWithLabelValues(hash, lvs)
m.mtx.RUnlock()
if ok {
return metric
}
m.mtx.Lock()
defer m.mtx.Unlock()
metric, ok = m.getMetricWithLabelValues(hash, lvs)
if !ok {
// Copy labelValues. Otherwise, they would be allocated even if we don't go
// down this code path.
copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
metric = m.newMetric(copiedLabelValues...)
m.children[hash] = metric
// Copy to avoid allocation in case wo don't go down this code path.
copiedLVs := make([]string, len(lvs))
copy(copiedLVs, lvs)
metric = m.newMetric(copiedLVs...)
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
}
return metric
}
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
m.mtx.RLock()
metric, ok := m.getMetricWithLabels(hash, labels)
m.mtx.RUnlock()
if ok {
return metric
}
m.mtx.Lock()
defer m.mtx.Unlock()
metric, ok = m.getMetricWithLabels(hash, labels)
if !ok {
lvs := m.extractLabelValues(labels)
metric = m.newMetric(lvs...)
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
}
return metric
}
// getMetricWithLabelValues gets a metric while handling possible collisions in
// the hash space. Must be called while holding read mutex.
func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
metrics, ok := m.children[h]
if ok {
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
return metrics[i].metric, true
}
}
return nil, false
}
// getMetricWithLabels gets a metric while handling possible collisions in
// the hash space. Must be called while holding read mutex.
func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
metrics, ok := m.children[h]
if ok {
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
return metrics[i].metric, true
}
}
return nil, false
}
// findMetricWithLabelValues returns the index of the matching metric or
// len(metrics) if not found.
func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
for i, metric := range metrics {
if m.matchLabelValues(metric.values, lvs) {
return i
}
}
return len(metrics)
}
// findMetricWithLabels returns the index of the matching metric or len(metrics)
// if not found.
func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
for i, metric := range metrics {
if m.matchLabels(metric.values, labels) {
return i
}
}
return len(metrics)
}
func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
if len(values) != len(lvs) {
return false
}
for i, v := range values {
if v != lvs[i] {
return false
}
}
return true
}
func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
if len(labels) != len(values) {
return false
}
for i, k := range m.desc.variableLabels {
if values[i] != labels[k] {
return false
}
}
return true
}
func (m *MetricVec) extractLabelValues(labels Labels) []string {
labelValues := make([]string, len(labels))
for i, k := range m.desc.variableLabels {
labelValues[i] = labels[k]
}
return labelValues
}

552
vendor/github.com/prometheus/procfs/mountstats.go generated vendored Normal file
View File

@ -0,0 +1,552 @@
package procfs
// While implementing parsing of /proc/[pid]/mountstats, this blog was used
// heavily as a reference:
// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
//
// Special thanks to Chris Siebenmann for all of his posts explaining the
// various statistics available for NFS.
import (
"bufio"
"fmt"
"io"
"strconv"
"strings"
"time"
)
// Constants shared between multiple functions.
const (
deviceEntryLen = 8
fieldBytesLen = 8
fieldEventsLen = 27
statVersion10 = "1.0"
statVersion11 = "1.1"
fieldTransport10Len = 10
fieldTransport11Len = 13
)
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
type Mount struct {
// Name of the device.
Device string
// The mount point of the device.
Mount string
// The filesystem type used by the device.
Type string
// If available additional statistics related to this Mount.
// Use a type assertion to determine if additional statistics are available.
Stats MountStats
}
// A MountStats is a type which contains detailed statistics for a specific
// type of Mount.
type MountStats interface {
mountStats()
}
// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
type MountStatsNFS struct {
// The version of statistics provided.
StatVersion string
// The age of the NFS mount.
Age time.Duration
// Statistics related to byte counters for various operations.
Bytes NFSBytesStats
// Statistics related to various NFS event occurrences.
Events NFSEventsStats
// Statistics broken down by filesystem operation.
Operations []NFSOperationStats
// Statistics about the NFS RPC transport.
Transport NFSTransportStats
}
// mountStats implements MountStats.
func (m MountStatsNFS) mountStats() {}
// A NFSBytesStats contains statistics about the number of bytes read and written
// by an NFS client to and from an NFS server.
type NFSBytesStats struct {
// Number of bytes read using the read() syscall.
Read int
// Number of bytes written using the write() syscall.
Write int
// Number of bytes read using the read() syscall in O_DIRECT mode.
DirectRead int
// Number of bytes written using the write() syscall in O_DIRECT mode.
DirectWrite int
// Number of bytes read from the NFS server, in total.
ReadTotal int
// Number of bytes written to the NFS server, in total.
WriteTotal int
// Number of pages read directly via mmap()'d files.
ReadPages int
// Number of pages written directly via mmap()'d files.
WritePages int
}
// A NFSEventsStats contains statistics about NFS event occurrences.
type NFSEventsStats struct {
// Number of times cached inode attributes are re-validated from the server.
InodeRevalidate int
// Number of times cached dentry nodes are re-validated from the server.
DnodeRevalidate int
// Number of times an inode cache is cleared.
DataInvalidate int
// Number of times cached inode attributes are invalidated.
AttributeInvalidate int
// Number of times files or directories have been open()'d.
VFSOpen int
// Number of times a directory lookup has occurred.
VFSLookup int
// Number of times permissions have been checked.
VFSAccess int
// Number of updates (and potential writes) to pages.
VFSUpdatePage int
// Number of pages read directly via mmap()'d files.
VFSReadPage int
// Number of times a group of pages have been read.
VFSReadPages int
// Number of pages written directly via mmap()'d files.
VFSWritePage int
// Number of times a group of pages have been written.
VFSWritePages int
// Number of times directory entries have been read with getdents().
VFSGetdents int
// Number of times attributes have been set on inodes.
VFSSetattr int
// Number of pending writes that have been forcefully flushed to the server.
VFSFlush int
// Number of times fsync() has been called on directories and files.
VFSFsync int
// Number of times locking has been attemped on a file.
VFSLock int
// Number of times files have been closed and released.
VFSFileRelease int
// Unknown. Possibly unused.
CongestionWait int
// Number of times files have been truncated.
Truncation int
// Number of times a file has been grown due to writes beyond its existing end.
WriteExtension int
// Number of times a file was removed while still open by another process.
SillyRename int
// Number of times the NFS server gave less data than expected while reading.
ShortRead int
// Number of times the NFS server wrote less data than expected while writing.
ShortWrite int
// Number of times the NFS server indicated EJUKEBOX; retrieving data from
// offline storage.
JukeboxDelay int
// Number of NFS v4.1+ pNFS reads.
PNFSRead int
// Number of NFS v4.1+ pNFS writes.
PNFSWrite int
}
// A NFSOperationStats contains statistics for a single operation.
type NFSOperationStats struct {
// The name of the operation.
Operation string
// Number of requests performed for this operation.
Requests int
// Number of times an actual RPC request has been transmitted for this operation.
Transmissions int
// Number of times a request has had a major timeout.
MajorTimeouts int
// Number of bytes sent for this operation, including RPC headers and payload.
BytesSent int
// Number of bytes received for this operation, including RPC headers and payload.
BytesReceived int
// Duration all requests spent queued for transmission before they were sent.
CumulativeQueueTime time.Duration
// Duration it took to get a reply back after the request was transmitted.
CumulativeTotalResponseTime time.Duration
// Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestTime time.Duration
}
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
// responses.
type NFSTransportStats struct {
// The local port used for the NFS mount.
Port int
// Number of times the client has had to establish a connection from scratch
// to the NFS server.
Bind int
// Number of times the client has made a TCP connection to the NFS server.
Connect int
// Duration (in jiffies, a kernel internal unit of time) the NFS mount has
// spent waiting for connections to the server to be established.
ConnectIdleTime int
// Duration since the NFS mount last saw any RPC traffic.
IdleTime time.Duration
// Number of RPC requests for this mount sent to the NFS server.
Sends int
// Number of RPC responses for this mount received from the NFS server.
Receives int
// Number of times the NFS server sent a response with a transaction ID
// unknown to this client.
BadTransactionIDs int
// A running counter, incremented on each request as the current difference
// ebetween sends and receives.
CumulativeActiveRequests int
// A running counter, incremented on each request by the current backlog
// queue size.
CumulativeBacklog int
// Stats below only available with stat version 1.1.
// Maximum number of simultaneously active RPC requests ever used.
MaximumRPCSlotsUsed int
// A running counter, incremented on each request as the current size of the
// sending queue.
CumulativeSendingQueue int
// A running counter, incremented on each request as the current size of the
// pending queue.
CumulativePendingQueue int
}
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
// of Mount structures containing detailed information about each mount.
// If available, statistics for each mount are parsed as well.
func parseMountStats(r io.Reader) ([]*Mount, error) {
const (
device = "device"
statVersionPrefix = "statvers="
nfs3Type = "nfs"
nfs4Type = "nfs4"
)
var mounts []*Mount
s := bufio.NewScanner(r)
for s.Scan() {
// Only look for device entries in this function
ss := strings.Fields(string(s.Bytes()))
if len(ss) == 0 || ss[0] != device {
continue
}
m, err := parseMount(ss)
if err != nil {
return nil, err
}
// Does this mount also possess statistics information?
if len(ss) > deviceEntryLen {
// Only NFSv3 and v4 are supported for parsing statistics
if m.Type != nfs3Type && m.Type != nfs4Type {
return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
}
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
stats, err := parseMountStatsNFS(s, statVersion)
if err != nil {
return nil, err
}
m.Stats = stats
}
mounts = append(mounts, m)
}
return mounts, s.Err()
}
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
// device [device] mounted on [mount] with fstype [type]
func parseMount(ss []string) (*Mount, error) {
if len(ss) < deviceEntryLen {
return nil, fmt.Errorf("invalid device entry: %v", ss)
}
// Check for specific words appearing at specific indices to ensure
// the format is consistent with what we expect
format := []struct {
i int
s string
}{
{i: 0, s: "device"},
{i: 2, s: "mounted"},
{i: 3, s: "on"},
{i: 5, s: "with"},
{i: 6, s: "fstype"},
}
for _, f := range format {
if ss[f.i] != f.s {
return nil, fmt.Errorf("invalid device entry: %v", ss)
}
}
return &Mount{
Device: ss[1],
Mount: ss[4],
Type: ss[7],
}, nil
}
// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
// related to NFS statistics.
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
// Field indicators for parsing specific types of data
const (
fieldAge = "age:"
fieldBytes = "bytes:"
fieldEvents = "events:"
fieldPerOpStats = "per-op"
fieldTransport = "xprt:"
)
stats := &MountStatsNFS{
StatVersion: statVersion,
}
for s.Scan() {
ss := strings.Fields(string(s.Bytes()))
if len(ss) == 0 {
break
}
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
switch ss[0] {
case fieldAge:
// Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s")
if err != nil {
return nil, err
}
stats.Age = d
case fieldBytes:
bstats, err := parseNFSBytesStats(ss[1:])
if err != nil {
return nil, err
}
stats.Bytes = *bstats
case fieldEvents:
estats, err := parseNFSEventsStats(ss[1:])
if err != nil {
return nil, err
}
stats.Events = *estats
case fieldTransport:
if len(ss) < 3 {
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
}
tstats, err := parseNFSTransportStats(ss[2:], statVersion)
if err != nil {
return nil, err
}
stats.Transport = *tstats
}
// When encountering "per-operation statistics", we must break this
// loop and parse them seperately to ensure we can terminate parsing
// before reaching another device entry; hence why this 'if' statement
// is not just another switch case
if ss[0] == fieldPerOpStats {
break
}
}
if err := s.Err(); err != nil {
return nil, err
}
// NFS per-operation stats appear last before the next device entry
perOpStats, err := parseNFSOperationStats(s)
if err != nil {
return nil, err
}
stats.Operations = perOpStats
return stats, nil
}
// parseNFSBytesStats parses a NFSBytesStats line using an input set of
// integer fields.
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
if len(ss) != fieldBytesLen {
return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
}
ns := make([]int, 0, fieldBytesLen)
for _, s := range ss {
n, err := strconv.Atoi(s)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
return &NFSBytesStats{
Read: ns[0],
Write: ns[1],
DirectRead: ns[2],
DirectWrite: ns[3],
ReadTotal: ns[4],
WriteTotal: ns[5],
ReadPages: ns[6],
WritePages: ns[7],
}, nil
}
// parseNFSEventsStats parses a NFSEventsStats line using an input set of
// integer fields.
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
if len(ss) != fieldEventsLen {
return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
}
ns := make([]int, 0, fieldEventsLen)
for _, s := range ss {
n, err := strconv.Atoi(s)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
return &NFSEventsStats{
InodeRevalidate: ns[0],
DnodeRevalidate: ns[1],
DataInvalidate: ns[2],
AttributeInvalidate: ns[3],
VFSOpen: ns[4],
VFSLookup: ns[5],
VFSAccess: ns[6],
VFSUpdatePage: ns[7],
VFSReadPage: ns[8],
VFSReadPages: ns[9],
VFSWritePage: ns[10],
VFSWritePages: ns[11],
VFSGetdents: ns[12],
VFSSetattr: ns[13],
VFSFlush: ns[14],
VFSFsync: ns[15],
VFSLock: ns[16],
VFSFileRelease: ns[17],
CongestionWait: ns[18],
Truncation: ns[19],
WriteExtension: ns[20],
SillyRename: ns[21],
ShortRead: ns[22],
ShortWrite: ns[23],
JukeboxDelay: ns[24],
PNFSRead: ns[25],
PNFSWrite: ns[26],
}, nil
}
// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
// additional information about per-operation statistics until an empty
// line is reached.
func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
const (
// Number of expected fields in each per-operation statistics set
numFields = 9
)
var ops []NFSOperationStats
for s.Scan() {
ss := strings.Fields(string(s.Bytes()))
if len(ss) == 0 {
// Must break when reading a blank line after per-operation stats to
// enable top-level function to parse the next device entry
break
}
if len(ss) != numFields {
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
}
// Skip string operation name for integers
ns := make([]int, 0, numFields-1)
for _, st := range ss[1:] {
n, err := strconv.Atoi(st)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
ops = append(ops, NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0],
Transmissions: ns[1],
MajorTimeouts: ns[2],
BytesSent: ns[3],
BytesReceived: ns[4],
CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
})
}
return ops, s.Err()
}
// parseNFSTransportStats parses a NFSTransportStats line using an input set of
// integer fields matched to a specific stats version.
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
switch statVersion {
case statVersion10:
if len(ss) != fieldTransport10Len {
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
}
case statVersion11:
if len(ss) != fieldTransport11Len {
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
}
default:
return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
}
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
// in a v1.0 response
ns := make([]int, 0, fieldTransport11Len)
for _, s := range ss {
n, err := strconv.Atoi(s)
if err != nil {
return nil, err
}
ns = append(ns, n)
}
return &NFSTransportStats{
Port: ns[0],
Bind: ns[1],
Connect: ns[2],
ConnectIdleTime: ns[3],
IdleTime: time.Duration(ns[4]) * time.Second,
Sends: ns[5],
Receives: ns[6],
BadTransactionIDs: ns[7],
CumulativeActiveRequests: ns[8],
CumulativeBacklog: ns[9],
MaximumRPCSlotsUsed: ns[10],
CumulativeSendingQueue: ns[11],
CumulativePendingQueue: ns[12],
}, nil
}

View File

@ -192,6 +192,18 @@ func (p Proc) FileDescriptorsLen() (int, error) {
return len(fds), nil
}
// MountStats retrieves statistics and configuration for mount points in a
// process's namespace.
func (p Proc) MountStats() ([]*Mount, error) {
f, err := os.Open(p.path("mountstats"))
if err != nil {
return nil, err
}
defer f.Close()
return parseMountStats(f)
}
func (p Proc) fileDescriptors() ([]string, error) {
d, err := os.Open(p.path("fd"))
if err != nil {

View File

@ -126,6 +126,8 @@ type Drawer struct {
// vertical line? Should DrawString return the number of runes drawn?
// DrawBytes draws s at the dot and advances the dot's location.
//
// It is equivalent to DrawString(string(s)) but may be more efficient.
func (d *Drawer) DrawBytes(s []byte) {
prevC := rune(-1)
for len(s) > 0 {
@ -168,6 +170,8 @@ func (d *Drawer) DrawString(s string) {
}
// MeasureBytes returns how far dot would advance by drawing s.
//
// It is equivalent to MeasureString(string(s)) but may be more efficient.
func (d *Drawer) MeasureBytes(s []byte) (advance fixed.Int26_6) {
return MeasureBytes(d.Face, s)
}
@ -178,6 +182,8 @@ func (d *Drawer) MeasureString(s string) (advance fixed.Int26_6) {
}
// MeasureBytes returns how far dot would advance by drawing s with f.
//
// It is equivalent to MeasureString(string(s)) but may be more efficient.
func MeasureBytes(f Face, s []byte) (advance fixed.Int26_6) {
prevC := rune(-1)
for len(s) > 0 {