mirror of https://github.com/knative/pkg.git
New observability package (#3188)
* include observability package * go mod changes * commit vendor * PR feedback
This commit is contained in:
parent
4ec554b5af
commit
5e2512c712
33
go.mod
33
go.mod
|
|
@ -18,9 +18,20 @@ require (
|
||||||
github.com/hashicorp/golang-lru v1.0.2
|
github.com/hashicorp/golang-lru v1.0.2
|
||||||
github.com/kelseyhightower/envconfig v1.4.0
|
github.com/kelseyhightower/envconfig v1.4.0
|
||||||
github.com/openzipkin/zipkin-go v0.4.3
|
github.com/openzipkin/zipkin-go v0.4.3
|
||||||
|
github.com/prometheus/client_golang v1.22.0
|
||||||
github.com/spf13/pflag v1.0.6
|
github.com/spf13/pflag v1.0.6
|
||||||
github.com/tsenart/vegeta/v12 v12.12.0
|
github.com/tsenart/vegeta/v12 v12.12.0
|
||||||
go.opencensus.io v0.24.0
|
go.opencensus.io v0.24.0
|
||||||
|
go.opentelemetry.io/otel v1.37.0
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0
|
||||||
|
go.opentelemetry.io/otel/exporters/prometheus v0.59.0
|
||||||
|
go.opentelemetry.io/otel/metric v1.37.0
|
||||||
|
go.opentelemetry.io/otel/sdk v1.37.0
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.37.0
|
||||||
|
go.opentelemetry.io/otel/trace v1.37.0
|
||||||
go.uber.org/automaxprocs v1.6.0
|
go.uber.org/automaxprocs v1.6.0
|
||||||
go.uber.org/zap v1.27.0
|
go.uber.org/zap v1.27.0
|
||||||
golang.org/x/net v0.41.0
|
golang.org/x/net v0.41.0
|
||||||
|
|
@ -45,13 +56,15 @@ require (
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
|
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
|
||||||
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
||||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||||
github.com/go-kit/log v0.2.1 // indirect
|
github.com/go-kit/log v0.2.1 // indirect
|
||||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||||
github.com/go-logr/logr v1.4.2 // indirect
|
github.com/go-logr/logr v1.4.3 // indirect
|
||||||
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||||
github.com/go-openapi/swag v0.23.0 // indirect
|
github.com/go-openapi/swag v0.23.0 // indirect
|
||||||
|
|
@ -59,7 +72,7 @@ require (
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.4 // indirect
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
github.com/google/gnostic-models v0.6.9 // indirect
|
github.com/google/gnostic-models v0.6.9 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
|
||||||
github.com/influxdata/tdigest v0.0.1 // indirect
|
github.com/influxdata/tdigest v0.0.1 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
|
|
@ -68,25 +81,27 @@ require (
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/prometheus/client_golang v1.22.0 // indirect
|
github.com/prometheus/client_model v0.6.2 // indirect
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
github.com/prometheus/common v0.65.0 // indirect
|
||||||
github.com/prometheus/common v0.62.0 // indirect
|
github.com/prometheus/procfs v0.16.1 // indirect
|
||||||
github.com/prometheus/procfs v0.15.1 // indirect
|
|
||||||
github.com/prometheus/statsd_exporter v0.22.7 // indirect
|
github.com/prometheus/statsd_exporter v0.22.7 // indirect
|
||||||
github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 // indirect
|
github.com/rs/dnscache v0.0.0-20230804202142-fc85eb664529 // indirect
|
||||||
github.com/x448/float16 v0.8.4 // indirect
|
github.com/x448/float16 v0.8.4 // indirect
|
||||||
|
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect
|
||||||
|
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||||
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac // indirect
|
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac // indirect
|
||||||
golang.org/x/mod v0.25.0 // indirect
|
golang.org/x/mod v0.25.0 // indirect
|
||||||
golang.org/x/oauth2 v0.28.0 // indirect
|
golang.org/x/oauth2 v0.30.0 // indirect
|
||||||
golang.org/x/sys v0.33.0 // indirect
|
golang.org/x/sys v0.33.0 // indirect
|
||||||
golang.org/x/term v0.32.0 // indirect
|
golang.org/x/term v0.32.0 // indirect
|
||||||
golang.org/x/text v0.26.0 // indirect
|
golang.org/x/text v0.26.0 // indirect
|
||||||
golang.org/x/time v0.10.0 // indirect
|
golang.org/x/time v0.10.0 // indirect
|
||||||
google.golang.org/api v0.183.0 // indirect
|
google.golang.org/api v0.183.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
|
|
|
||||||
69
go.sum
69
go.sum
|
|
@ -58,6 +58,8 @@ github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHf
|
||||||
github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc=
|
github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc=
|
||||||
github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e h1:mWOqoK5jV13ChKf/aF3plwQ96laasTJgZi4f1aSOu+M=
|
github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e h1:mWOqoK5jV13ChKf/aF3plwQ96laasTJgZi4f1aSOu+M=
|
||||||
github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q=
|
github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q=
|
||||||
|
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
|
||||||
|
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
|
github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
|
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
|
||||||
|
|
@ -108,8 +110,9 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
|
||||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||||
|
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||||
|
|
@ -202,8 +205,8 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
|
||||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
|
github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
|
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
|
||||||
|
|
@ -291,24 +294,24 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||||
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
||||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||||
github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0=
|
github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0=
|
||||||
github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI=
|
github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
|
|
@ -360,16 +363,30 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0 h1:zG8GlgXCJQd5BU98C0hZnBbElszTmUgCNCfYneaDL0A=
|
||||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0/go.mod h1:hOfBCz8kv/wuq73Mx2H2QnWokh/kHZxkh6SNF2bdKtw=
|
||||||
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0 h1:9PgnL3QNlj10uGxExowIDIZu66aVBwWhXmbOp1pa6RA=
|
||||||
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0/go.mod h1:0ineDcLELf6JmKfuo0wvvhAVMuxWFYvkTin2iV4ydPQ=
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM=
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M=
|
||||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc=
|
||||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 h1:bDMKF3RUSxshZ5OjOTi8rsHGaPKsAt76FaqgvIUySLc=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0/go.mod h1:dDT67G/IkA46Mr2l9Uj7HsQVwsjASyV9SjGofsiUZDA=
|
||||||
|
go.opentelemetry.io/otel/exporters/prometheus v0.59.0 h1:HHf+wKS6o5++XZhS98wvILrLVgHxjA/AMjqHKes+uzo=
|
||||||
|
go.opentelemetry.io/otel/exporters/prometheus v0.59.0/go.mod h1:R8GpRXTZrqvXHDEGVH5bF6+JqAZcK8PjJcZ5nGhEWiE=
|
||||||
|
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||||
|
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
|
||||||
|
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||||
|
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||||
|
go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os=
|
||||||
|
go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo=
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||||
|
|
@ -470,8 +487,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||||
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
|
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||||
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
|
@ -657,10 +674,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
|
||||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM=
|
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8=
|
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,67 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package attributekey
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ValueType interface {
|
||||||
|
string | bool | int64 | int | float64 |
|
||||||
|
[]string | []bool | []int64 | []int | []float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
Type[T ValueType] string
|
||||||
|
String = Type[string]
|
||||||
|
Bool = Type[bool]
|
||||||
|
Int = Type[int]
|
||||||
|
Int64 = Type[int64]
|
||||||
|
Float64 = Type[float64]
|
||||||
|
)
|
||||||
|
|
||||||
|
func (key Type[T]) With(val T) attribute.KeyValue {
|
||||||
|
k := string(key)
|
||||||
|
|
||||||
|
switch v := any(val).(type) {
|
||||||
|
case string:
|
||||||
|
return attribute.String(k, v)
|
||||||
|
case bool:
|
||||||
|
return attribute.Bool(k, v)
|
||||||
|
case int64:
|
||||||
|
return attribute.Int64(k, v)
|
||||||
|
case int:
|
||||||
|
return attribute.Int(k, v)
|
||||||
|
case float64:
|
||||||
|
return attribute.Float64(k, v)
|
||||||
|
case []string:
|
||||||
|
return attribute.StringSlice(k, v)
|
||||||
|
case []bool:
|
||||||
|
return attribute.BoolSlice(k, v)
|
||||||
|
case []int64:
|
||||||
|
return attribute.Int64Slice(k, v)
|
||||||
|
case []int:
|
||||||
|
return attribute.IntSlice(k, v)
|
||||||
|
case []float64:
|
||||||
|
return attribute.Float64Slice(k, v)
|
||||||
|
default:
|
||||||
|
// note - this can't happen due to type constraints
|
||||||
|
panic(fmt.Sprintf("unsupported attribute type: %T", v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,86 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package attributekey
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWith(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
got attribute.KeyValue
|
||||||
|
want attribute.KeyValue
|
||||||
|
wantPanic bool
|
||||||
|
}{{
|
||||||
|
name: "string",
|
||||||
|
got: String("key").With("val"),
|
||||||
|
want: attribute.String("key", "val"),
|
||||||
|
}, {
|
||||||
|
name: "bool",
|
||||||
|
got: Bool("key").With(true),
|
||||||
|
want: attribute.Bool("key", true),
|
||||||
|
}, {
|
||||||
|
name: "int",
|
||||||
|
got: Int("key").With(1),
|
||||||
|
want: attribute.Int("key", 1),
|
||||||
|
}, {
|
||||||
|
name: "int64",
|
||||||
|
got: Int64("key").With(1),
|
||||||
|
want: attribute.Int64("key", 1),
|
||||||
|
}, {
|
||||||
|
name: "float64",
|
||||||
|
got: Float64("key").With(1),
|
||||||
|
want: attribute.Float64("key", 1),
|
||||||
|
}, {
|
||||||
|
name: "string slice",
|
||||||
|
got: Type[[]string]("key").With([]string{"1"}),
|
||||||
|
want: attribute.StringSlice("key", []string{"1"}),
|
||||||
|
}, {
|
||||||
|
name: "bool slice",
|
||||||
|
got: Type[[]bool]("key").With([]bool{true}),
|
||||||
|
want: attribute.BoolSlice("key", []bool{true}),
|
||||||
|
}, {
|
||||||
|
name: "float64 slice",
|
||||||
|
got: Type[[]float64]("key").With([]float64{1}),
|
||||||
|
want: attribute.Float64Slice("key", []float64{1}),
|
||||||
|
}, {
|
||||||
|
name: "int64 slice",
|
||||||
|
got: Type[[]int64]("key").With([]int64{1}),
|
||||||
|
want: attribute.Int64Slice("key", []int64{1}),
|
||||||
|
}, {
|
||||||
|
name: "int slice",
|
||||||
|
got: Type[[]int]("key").With([]int{1}),
|
||||||
|
want: attribute.IntSlice("key", []int{1}),
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
if diff := diff(tc.want, tc.got); diff != "" {
|
||||||
|
t.Errorf("mismatch (-want +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func diff(want, got any) string {
|
||||||
|
return cmp.Diff(want, got, cmpopts.EquateComparable(attribute.KeyValue{}))
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,79 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package observability
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"knative.dev/pkg/observability/metrics"
|
||||||
|
"knative.dev/pkg/observability/runtime"
|
||||||
|
"knative.dev/pkg/observability/tracing"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
TracingConfig = tracing.Config
|
||||||
|
MetricsConfig = metrics.Config
|
||||||
|
RuntimeConfig = runtime.Config
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Tracing TracingConfig
|
||||||
|
Metrics MetricsConfig
|
||||||
|
Runtime RuntimeConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func DefaultConfig() *Config {
|
||||||
|
return &Config{
|
||||||
|
Tracing: tracing.DefaultConfig(),
|
||||||
|
Metrics: metrics.DefaultConfig(),
|
||||||
|
Runtime: runtime.DefaultConfig(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFromMap(m map[string]string) (*Config, error) {
|
||||||
|
var err error
|
||||||
|
c := DefaultConfig()
|
||||||
|
|
||||||
|
if c.Tracing, err = tracing.NewFromMap(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Metrics, err = metrics.NewFromMap(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if c.Runtime, err = runtime.NewFromMap(m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type cfgKey struct{}
|
||||||
|
|
||||||
|
// WithConfig associates a observability configuration with the context.
|
||||||
|
func WithConfig(ctx context.Context, cfg *Config) context.Context {
|
||||||
|
return context.WithValue(ctx, cfgKey{}, cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConfig gets the observability config from the provided context.
|
||||||
|
func GetConfig(ctx context.Context) *Config {
|
||||||
|
untyped := ctx.Value(cfgKey{})
|
||||||
|
if untyped == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return untyped.(*Config)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,86 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package observability
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"knative.dev/pkg/observability/metrics"
|
||||||
|
"knative.dev/pkg/observability/runtime"
|
||||||
|
"knative.dev/pkg/observability/tracing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConfigInContext(t *testing.T) {
|
||||||
|
want := DefaultConfig()
|
||||||
|
ctx := WithConfig(context.Background(), want)
|
||||||
|
got := GetConfig(ctx)
|
||||||
|
|
||||||
|
if diff := cmp.Diff(want, got); diff != "" {
|
||||||
|
t.Errorf("mismatch (-want +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNoConfigInContext(t *testing.T) {
|
||||||
|
got := GetConfig(context.Background())
|
||||||
|
want := (*Config)(nil)
|
||||||
|
|
||||||
|
if diff := cmp.Diff(want, got); diff != "" {
|
||||||
|
t.Errorf("mismatch (-want +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewFromMap(t *testing.T) {
|
||||||
|
cm := map[string]string{
|
||||||
|
"tracing-protocol": "grpc",
|
||||||
|
"tracing-endpoint": "https://example.com",
|
||||||
|
"tracing-sampling-rate": "1",
|
||||||
|
"runtime-profiling": "enabled",
|
||||||
|
"runtime-export-interval": "15s",
|
||||||
|
"metrics-protocol": "grpc",
|
||||||
|
"metrics-endpoint": "https://example.com",
|
||||||
|
"metrics-export-interval": "10s",
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := NewFromMap(cm)
|
||||||
|
if err != nil {
|
||||||
|
t.Error("unexpected error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
want := &Config{
|
||||||
|
Tracing: tracing.Config{
|
||||||
|
Protocol: "grpc",
|
||||||
|
Endpoint: "https://example.com",
|
||||||
|
SamplingRate: 1,
|
||||||
|
},
|
||||||
|
Runtime: runtime.Config{
|
||||||
|
Profiling: "enabled",
|
||||||
|
ExportInterval: 15 * time.Second,
|
||||||
|
},
|
||||||
|
Metrics: metrics.Config{
|
||||||
|
Protocol: "grpc",
|
||||||
|
Endpoint: "https://example.com",
|
||||||
|
ExportInterval: 10 * time.Second,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(want, got); diff != "" {
|
||||||
|
t.Errorf("mismatch (-want +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,39 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package configmap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
"knative.dev/pkg/observability"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// The env var name for config-observability
|
||||||
|
configMapNameEnv = "CONFIG_OBSERVABILITY_NAME"
|
||||||
|
DefaultName = "config-observability"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Name() string {
|
||||||
|
return cmp.Or(os.Getenv(configMapNameEnv), DefaultName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Parse(c *corev1.ConfigMap) (*observability.Config, error) {
|
||||||
|
return observability.NewFromMap(c.Data)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,83 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package configmap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
"knative.dev/pkg/observability"
|
||||||
|
"knative.dev/pkg/observability/metrics"
|
||||||
|
"knative.dev/pkg/observability/runtime"
|
||||||
|
"knative.dev/pkg/observability/tracing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestName(t *testing.T) {
|
||||||
|
if diff := cmp.Diff(DefaultName, Name()); diff != "" {
|
||||||
|
t.Error("unexpected name (-want +got): ", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Setenv(configMapNameEnv, "my-config")
|
||||||
|
if diff := cmp.Diff("my-config", Name()); diff != "" {
|
||||||
|
t.Error("unexpected name (-want +got): ", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParse(t *testing.T) {
|
||||||
|
cm := &corev1.ConfigMap{
|
||||||
|
Data: map[string]string{
|
||||||
|
"tracing-protocol": "grpc",
|
||||||
|
"tracing-endpoint": "https://example.com",
|
||||||
|
"tracing-sampling-rate": "1",
|
||||||
|
"runtime-profiling": "enabled",
|
||||||
|
"runtime-export-interval": "15s",
|
||||||
|
"metrics-protocol": "grpc",
|
||||||
|
"metrics-endpoint": "https://example.com",
|
||||||
|
"metrics-export-interval": "10s",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := Parse(cm)
|
||||||
|
if err != nil {
|
||||||
|
t.Error("unexpected error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
want := &observability.Config{
|
||||||
|
Tracing: tracing.Config{
|
||||||
|
Protocol: "grpc",
|
||||||
|
Endpoint: "https://example.com",
|
||||||
|
SamplingRate: 1,
|
||||||
|
},
|
||||||
|
Runtime: runtime.Config{
|
||||||
|
Profiling: "enabled",
|
||||||
|
ExportInterval: 15 * time.Second,
|
||||||
|
},
|
||||||
|
Metrics: metrics.Config{
|
||||||
|
Protocol: "grpc",
|
||||||
|
Endpoint: "https://example.com",
|
||||||
|
ExportInterval: 10 * time.Second,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(want, got); diff != "" {
|
||||||
|
t.Errorf("mismatch (-want +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,92 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
configmap "knative.dev/pkg/configmap/parser"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ProtocolGRPC = "grpc"
|
||||||
|
ProtocolHTTPProtobuf = "http/protobuf"
|
||||||
|
ProtocolPrometheus = "prometheus"
|
||||||
|
ProtocolNone = "none"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config provides a unified observability configuration which can be used to
|
||||||
|
// manage Knative observability behavior. Typically, this is extracted from a
|
||||||
|
// Kubernetes ConfigMap during application startup, and accessed via the
|
||||||
|
// GetConfig() method.
|
||||||
|
type Config struct {
|
||||||
|
Protocol string
|
||||||
|
Endpoint string
|
||||||
|
|
||||||
|
ExportInterval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) Validate() error {
|
||||||
|
switch c.Protocol {
|
||||||
|
case ProtocolGRPC, ProtocolHTTPProtobuf:
|
||||||
|
if c.Endpoint == "" {
|
||||||
|
return fmt.Errorf("endpoint should be set when protocol is %q", c.Protocol)
|
||||||
|
}
|
||||||
|
case ProtocolNone:
|
||||||
|
if c.Endpoint != "" {
|
||||||
|
return fmt.Errorf("endpoint should not be set when protocol is %q", c.Protocol)
|
||||||
|
}
|
||||||
|
case ProtocolPrometheus:
|
||||||
|
// Endpoint is not required, but can be used to indicate listen port
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported protocol %q", c.Protocol)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.ExportInterval < 0 {
|
||||||
|
return fmt.Errorf("export interval %q should be greater than zero", c.ExportInterval)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultConfig returns a configuration with default values set.
|
||||||
|
func DefaultConfig() Config {
|
||||||
|
return Config{
|
||||||
|
Protocol: ProtocolNone,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFromMap unpacks flat configuration values from a ConfigMap into
|
||||||
|
// the configuration used by different observability modules.
|
||||||
|
func NewFromMap(m map[string]string) (Config, error) {
|
||||||
|
return NewFromMapWithPrefix("", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFromMapWithPrefix(prefix string, m map[string]string) (Config, error) {
|
||||||
|
c := DefaultConfig()
|
||||||
|
|
||||||
|
err := configmap.Parse(m,
|
||||||
|
configmap.As(prefix+"metrics-protocol", &c.Protocol),
|
||||||
|
configmap.As(prefix+"metrics-endpoint", &c.Endpoint),
|
||||||
|
configmap.As(prefix+"metrics-export-interval", &c.ExportInterval),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return c, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, c.Validate()
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,88 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewFromMap(t *testing.T) {
|
||||||
|
got, err := NewFromMap(nil)
|
||||||
|
want := DefaultConfig()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Error("unexpected error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(want, got); diff != "" {
|
||||||
|
t.Error("unexpected diff (-want +got): ", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewFromMapBadInput(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
m map[string]string
|
||||||
|
}{{
|
||||||
|
name: "unexpected endpoint set",
|
||||||
|
m: map[string]string{"metrics-endpoint": "https://blah.example.com"},
|
||||||
|
}, {
|
||||||
|
name: "missing endpoint",
|
||||||
|
m: map[string]string{"metrics-protocol": "grpc"},
|
||||||
|
}, {
|
||||||
|
name: "unsupported protocol",
|
||||||
|
m: map[string]string{"metrics-protocol": "bad-protocol"},
|
||||||
|
}, {
|
||||||
|
name: "bad export interval - negative number",
|
||||||
|
m: map[string]string{"metrics-export-interval": "-1s"},
|
||||||
|
}, {
|
||||||
|
name: "bad export interval - not an integer",
|
||||||
|
m: map[string]string{"metrics-export-interval": "bad-interval"},
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
if _, err := NewFromMap(tc.m); err == nil {
|
||||||
|
t.Error("expected an error")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewFromMapWithPrefix(t *testing.T) {
|
||||||
|
got, err := NewFromMapWithPrefix("request-", map[string]string{
|
||||||
|
"request-metrics-protocol": ProtocolGRPC,
|
||||||
|
"request-metrics-endpoint": "https://blah.example.com",
|
||||||
|
"request-metrics-export-interval": "15s",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Error("unexpected error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
want := Config{
|
||||||
|
Protocol: ProtocolGRPC,
|
||||||
|
Endpoint: "https://blah.example.com",
|
||||||
|
ExportInterval: 15 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(want, got); diff != "" {
|
||||||
|
t.Error("unexpected diff (-want +got): ", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,33 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// OpenTelemetry views allow you to override the default behaviour
|
||||||
|
// of the SDK. It defines how data should be collected for certain
|
||||||
|
// instruments.
|
||||||
|
//
|
||||||
|
// OpenCensus allowed views to be registered globally to simplify
|
||||||
|
// setup. In contrast, OpenTelemetry requires views to be provided
|
||||||
|
// when creating a [MeterProvider] using the [WithView] option. This
|
||||||
|
// was done to provide flexibility of having multiple metrics pipelines.
|
||||||
|
//
|
||||||
|
// To provide a similar UX experience the globalviews provides a similar
|
||||||
|
// way to register OTel views globally. This allows Knative maintainers
|
||||||
|
// to author packages with metrics and include views that can be consumed
|
||||||
|
// by packages such as sharedmain.
|
||||||
|
//
|
||||||
|
// [MeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric#NewMeterProvider
|
||||||
|
// [WithView]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric#WithView
|
||||||
|
package globalviews
|
||||||
|
|
@ -0,0 +1,41 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package globalviews
|
||||||
|
|
||||||
|
import (
|
||||||
|
"maps"
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
)
|
||||||
|
|
||||||
|
var globalViews map[string][]metric.View = make(map[string][]metric.View)
|
||||||
|
|
||||||
|
// Register should be called during package init
|
||||||
|
func Register(pkg string, v ...metric.View) {
|
||||||
|
views := globalViews[pkg]
|
||||||
|
globalViews[pkg] = append(views, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetPackageViews(pkg string) []metric.View {
|
||||||
|
return globalViews[pkg]
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetAllViews() []metric.View {
|
||||||
|
list := slices.Collect(maps.Values(globalViews))
|
||||||
|
return slices.Concat(list...)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,87 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package globalviews_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"knative.dev/pkg/observability/metrics/globalviews"
|
||||||
|
)
|
||||||
|
|
||||||
|
var testView = metric.NewView(
|
||||||
|
metric.Instrument{
|
||||||
|
Name: "latency",
|
||||||
|
Scope: instrumentation.Scope{Name: "http"},
|
||||||
|
},
|
||||||
|
metric.Stream{
|
||||||
|
Aggregation: metric.AggregationBase2ExponentialHistogram{
|
||||||
|
MaxSize: 160,
|
||||||
|
MaxScale: 20,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
var secondView = metric.NewView(
|
||||||
|
metric.Instrument{
|
||||||
|
Name: "latency",
|
||||||
|
Scope: instrumentation.Scope{Name: "http"},
|
||||||
|
},
|
||||||
|
metric.Stream{
|
||||||
|
Aggregation: metric.AggregationBase2ExponentialHistogram{
|
||||||
|
MaxSize: 160,
|
||||||
|
MaxScale: 20,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRegistration(t *testing.T) {
|
||||||
|
testPackage := "com.example.package"
|
||||||
|
|
||||||
|
if len(globalviews.GetAllViews()) != 0 {
|
||||||
|
t.Fatal("expected zero views")
|
||||||
|
}
|
||||||
|
|
||||||
|
globalviews.Register(testPackage, testView)
|
||||||
|
|
||||||
|
if count := len(globalviews.GetAllViews()); count != 1 {
|
||||||
|
t.Fatalf("expected global view count to be 1 got %d", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count := len(globalviews.GetPackageViews(testPackage)); count != 1 {
|
||||||
|
t.Fatalf("expected a single view for %q got %d", testPackage, count)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count := len(globalviews.GetPackageViews("com.example.second.package")); count != 0 {
|
||||||
|
t.Fatalf("expected no views for 'second' package got %d", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
globalviews.Register(testPackage, secondView)
|
||||||
|
|
||||||
|
if count := len(globalviews.GetAllViews()); count != 2 {
|
||||||
|
t.Fatalf("expected global view count to be 2 got %d", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count := len(globalviews.GetPackageViews(testPackage)); count != 2 {
|
||||||
|
t.Fatalf("expected two views for %q got %d", testPackage, count)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count := len(globalviews.GetPackageViews("com.example.second.package")); count != 0 {
|
||||||
|
t.Fatalf("expected no views for 'second' package got %d", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,136 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package metricstest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
)
|
||||||
|
|
||||||
|
type metricReader interface {
|
||||||
|
Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type testingT interface {
|
||||||
|
Error(args ...any)
|
||||||
|
Errorf(format string, args ...any)
|
||||||
|
Fatalf(format string, args ...any)
|
||||||
|
Fatal(args ...any)
|
||||||
|
Helper()
|
||||||
|
Failed() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type AssertFunc func(testingT, *metricdata.ResourceMetrics)
|
||||||
|
|
||||||
|
func AssertMetrics(t testingT, r metricReader, assertFns ...AssertFunc) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var rm metricdata.ResourceMetrics
|
||||||
|
r.Collect(context.Background(), &rm)
|
||||||
|
|
||||||
|
for _, assertFn := range assertFns {
|
||||||
|
assertFn(t, &rm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func HasAttributes(
|
||||||
|
scopePrefix string,
|
||||||
|
metricPrefix string,
|
||||||
|
want ...attribute.KeyValue,
|
||||||
|
) AssertFunc {
|
||||||
|
return func(t testingT, rm *metricdata.ResourceMetrics) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
assertCalled := false
|
||||||
|
|
||||||
|
if len(want) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sm := range rm.ScopeMetrics {
|
||||||
|
if !strings.HasPrefix(sm.Scope.Name, scopePrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, metric := range sm.Metrics {
|
||||||
|
if !strings.HasPrefix(metric.Name, metricPrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
assertCalled = true
|
||||||
|
|
||||||
|
mt := t.(metricdatatest.TestingT)
|
||||||
|
switch data := metric.Data.(type) {
|
||||||
|
case metricdata.Sum[int64]:
|
||||||
|
metricdatatest.AssertHasAttributes(mt, data, want...)
|
||||||
|
case metricdata.Sum[float64]:
|
||||||
|
metricdatatest.AssertHasAttributes(mt, data, want...)
|
||||||
|
case metricdata.Histogram[int64]:
|
||||||
|
metricdatatest.AssertHasAttributes(mt, data, want...)
|
||||||
|
case metricdata.Histogram[float64]:
|
||||||
|
metricdatatest.AssertHasAttributes(mt, data, want...)
|
||||||
|
case metricdata.ExponentialHistogram[int64]:
|
||||||
|
metricdatatest.AssertHasAttributes(mt, data, want...)
|
||||||
|
case metricdata.ExponentialHistogram[float64]:
|
||||||
|
metricdatatest.AssertHasAttributes(mt, data, want...)
|
||||||
|
case metricdata.Gauge[int64]:
|
||||||
|
metricdatatest.AssertHasAttributes(mt, data, want...)
|
||||||
|
case metricdata.Gauge[float64]:
|
||||||
|
metricdatatest.AssertHasAttributes(mt, data, want...)
|
||||||
|
default:
|
||||||
|
t.Fatalf("unsupported metric data type for metric %q: %T", metric.Name, data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !assertCalled {
|
||||||
|
t.Error("expected attributes but scope and metric prefix didn't match any results")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func MetricsPresent(scopeName string, names ...string) AssertFunc {
|
||||||
|
return func(t testingT, rm *metricdata.ResourceMetrics) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
want := sets.New(names...)
|
||||||
|
got := sets.New[string]()
|
||||||
|
|
||||||
|
for _, sm := range rm.ScopeMetrics {
|
||||||
|
if sm.Scope.Name != scopeName {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, metric := range sm.Metrics {
|
||||||
|
if !want.Has(metric.Name) {
|
||||||
|
t.Fatal("unexpected metric", metric.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
got.Insert(metric.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
diff := want.Difference(got)
|
||||||
|
if len(diff) > 0 {
|
||||||
|
t.Fatal("expected metrics didn't appear", diff.UnsortedList())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,200 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package metricstest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAssertMetricsPresent(t *testing.T) {
|
||||||
|
ft := &fakeT{}
|
||||||
|
|
||||||
|
m := &metrics{
|
||||||
|
ScopeMetrics: []metricdata.ScopeMetrics{{
|
||||||
|
Scope: instrumentation.Scope{
|
||||||
|
Name: "first",
|
||||||
|
},
|
||||||
|
Metrics: []metricdata.Metrics{{
|
||||||
|
Name: "a",
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
AssertMetrics(ft, m, MetricsPresent("first", "a"))
|
||||||
|
|
||||||
|
if ft.Failed() {
|
||||||
|
t.Error("unexpected failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
ft = &fakeT{}
|
||||||
|
|
||||||
|
AssertMetrics(ft, m, MetricsPresent("second", "a"))
|
||||||
|
|
||||||
|
if !ft.Failed() {
|
||||||
|
t.Error("should have failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
AssertMetrics(ft, m, MetricsPresent("first", "b"))
|
||||||
|
|
||||||
|
if !ft.Failed() {
|
||||||
|
t.Error("should have failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasAttributes(t *testing.T) {
|
||||||
|
m := &metrics{
|
||||||
|
ScopeMetrics: []metricdata.ScopeMetrics{{
|
||||||
|
Scope: instrumentation.Scope{Name: "first-scope"},
|
||||||
|
Metrics: []metricdata.Metrics{{
|
||||||
|
Name: "a-metric",
|
||||||
|
Data: metricdata.Sum[int64]{
|
||||||
|
DataPoints: []metricdata.DataPoint[int64]{{
|
||||||
|
Attributes: attribute.NewSet(
|
||||||
|
attribute.String("k", "v"),
|
||||||
|
),
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
Scope: instrumentation.Scope{Name: "second-scope"},
|
||||||
|
Metrics: []metricdata.Metrics{{
|
||||||
|
Name: "b-metric",
|
||||||
|
Data: metricdata.Sum[int64]{
|
||||||
|
DataPoints: []metricdata.DataPoint[int64]{{
|
||||||
|
Attributes: attribute.NewSet(
|
||||||
|
attribute.String("k", "v"),
|
||||||
|
),
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Name: "c-metric",
|
||||||
|
Data: metricdata.Histogram[int64]{
|
||||||
|
DataPoints: []metricdata.HistogramDataPoint[int64]{{
|
||||||
|
Attributes: attribute.NewSet(
|
||||||
|
attribute.String("k", "v"),
|
||||||
|
),
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
Name: "d-metric",
|
||||||
|
Data: metricdata.Histogram[float64]{
|
||||||
|
DataPoints: []metricdata.HistogramDataPoint[float64]{{
|
||||||
|
Attributes: attribute.NewSet(
|
||||||
|
attribute.String("k", "v"),
|
||||||
|
),
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("no attributes", func(t *testing.T) {
|
||||||
|
ft := &fakeT{}
|
||||||
|
|
||||||
|
AssertMetrics(ft, m, HasAttributes("first", "a"))
|
||||||
|
|
||||||
|
// no metric prefix
|
||||||
|
AssertMetrics(ft, m, HasAttributes("first", ""))
|
||||||
|
|
||||||
|
// no scope prefix
|
||||||
|
AssertMetrics(ft, m, HasAttributes("", "a",
|
||||||
|
attribute.String("k", "v"),
|
||||||
|
))
|
||||||
|
|
||||||
|
// no scope and metric prefix
|
||||||
|
AssertMetrics(ft, m, HasAttributes("", ""))
|
||||||
|
|
||||||
|
if ft.Failed() {
|
||||||
|
t.Error("unexpected failure")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("has attributes", func(t *testing.T) {
|
||||||
|
ft := &fakeT{}
|
||||||
|
|
||||||
|
AssertMetrics(ft, m, HasAttributes("first", "a",
|
||||||
|
attribute.String("k", "v"),
|
||||||
|
))
|
||||||
|
|
||||||
|
// no metric prefix
|
||||||
|
AssertMetrics(ft, m, HasAttributes("first", "",
|
||||||
|
attribute.String("k", "v"),
|
||||||
|
))
|
||||||
|
|
||||||
|
// no scope prefix
|
||||||
|
AssertMetrics(ft, m, HasAttributes("", "a",
|
||||||
|
attribute.String("k", "v"),
|
||||||
|
))
|
||||||
|
|
||||||
|
// no scope and metric prefix
|
||||||
|
AssertMetrics(ft, m, HasAttributes("", "",
|
||||||
|
attribute.String("k", "v"),
|
||||||
|
))
|
||||||
|
|
||||||
|
if ft.Failed() {
|
||||||
|
t.Error("unexpected failure")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no match", func(t *testing.T) {
|
||||||
|
ft := &fakeT{}
|
||||||
|
|
||||||
|
AssertMetrics(ft, m,
|
||||||
|
HasAttributes("", "", attribute.String("k", "a")))
|
||||||
|
|
||||||
|
if !ft.Failed() {
|
||||||
|
t.Error("expected failure")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no metrics", func(t *testing.T) {
|
||||||
|
ft := &fakeT{}
|
||||||
|
m := &metrics{}
|
||||||
|
|
||||||
|
AssertMetrics(ft, m,
|
||||||
|
HasAttributes("first", "a", attribute.String("k", "a")))
|
||||||
|
|
||||||
|
if !ft.Failed() {
|
||||||
|
t.Error("expected failure")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type metrics metricdata.ResourceMetrics
|
||||||
|
|
||||||
|
func (f *metrics) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
|
||||||
|
*rm = (metricdata.ResourceMetrics)(*f)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type fakeT struct {
|
||||||
|
failed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *fakeT) Failed() bool {
|
||||||
|
return t.failed
|
||||||
|
}
|
||||||
|
func (t *fakeT) Error(args ...any) { t.failed = true }
|
||||||
|
func (t *fakeT) Errorf(format string, args ...any) { t.failed = true }
|
||||||
|
func (t *fakeT) Fatal(args ...any) { t.failed = true }
|
||||||
|
func (t *fakeT) Fatalf(format string, args ...any) { t.failed = true }
|
||||||
|
func (t *fakeT) Helper() {}
|
||||||
|
|
@ -0,0 +1,61 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package metricstest provides test helpers to assert OTel metrics
|
||||||
|
are recorded properly.
|
||||||
|
|
||||||
|
When setting up your package for unit tests it is recommended that
|
||||||
|
your instrumentation can be reset between tests. This can be accomplished
|
||||||
|
using the following structure.
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
resetPackageMetrics()
|
||||||
|
}
|
||||||
|
|
||||||
|
var instrument metric.Int64Counter
|
||||||
|
|
||||||
|
func resetPackageMetrics() {
|
||||||
|
// Reset the meter
|
||||||
|
var meter = otel.Meter("com.some.scope")
|
||||||
|
|
||||||
|
// Reset the instrumentation
|
||||||
|
instrument = meter.Int64Counter("some.metric")
|
||||||
|
}
|
||||||
|
|
||||||
|
Then in your tests you can setup the metrics pipeline as follows:
|
||||||
|
|
||||||
|
func TestMetricsRecorded(t *testing.T) {
|
||||||
|
reader := metric.NewManualReader()
|
||||||
|
provider := metric.NewMeterProvider(metric.WithReader(reader))
|
||||||
|
|
||||||
|
// Set the global provider
|
||||||
|
otel.SetMeterProvider(provider)
|
||||||
|
|
||||||
|
// This rebuilds instruments with the new provider and directs
|
||||||
|
// the metrics to be recorded in the manual reader declared above.
|
||||||
|
resetPackageMetrics()
|
||||||
|
|
||||||
|
doSomething()
|
||||||
|
|
||||||
|
|
||||||
|
metricstest.AssertMetrics(t, reader,
|
||||||
|
metricstest.MetricPresent(...),
|
||||||
|
metricstest.HasAttributes(...),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
package metricstest
|
||||||
|
|
@ -0,0 +1,124 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultPrometheusPort = "9090"
|
||||||
|
defaultPrometheusReportingPeriod = 5
|
||||||
|
maxPrometheusPort = 65535
|
||||||
|
minPrometheusPort = 1024
|
||||||
|
defaultPrometheusHost = "" // IPv4 and IPv6
|
||||||
|
prometheusPortEnvName = "METRICS_PROMETHEUS_PORT"
|
||||||
|
prometheusHostEnvName = "METRICS_PROMETHEUS_HOST"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ServerOption func(*options)
|
||||||
|
|
||||||
|
type Server struct {
|
||||||
|
http *http.Server
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewServer(opts ...ServerOption) (*Server, error) {
|
||||||
|
o := options{
|
||||||
|
host: defaultPrometheusHost,
|
||||||
|
port: defaultPrometheusPort,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&o)
|
||||||
|
}
|
||||||
|
|
||||||
|
envOverride(&o.host, prometheusHostEnvName)
|
||||||
|
envOverride(&o.port, prometheusPortEnvName)
|
||||||
|
|
||||||
|
if err := validate(&o); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.Handle("GET /metrics", promhttp.Handler())
|
||||||
|
|
||||||
|
addr := net.JoinHostPort(o.host, o.port)
|
||||||
|
|
||||||
|
return &Server{
|
||||||
|
http: &http.Server{
|
||||||
|
Addr: addr,
|
||||||
|
Handler: mux,
|
||||||
|
// https://medium.com/a-journey-with-go/go-understand-and-mitigate-slowloris-attack-711c1b1403f6
|
||||||
|
ReadHeaderTimeout: 5 * time.Second,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) ListenAndServe() {
|
||||||
|
s.http.ListenAndServe()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) Shutdown(ctx context.Context) error {
|
||||||
|
return s.http.Shutdown(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
type options struct {
|
||||||
|
host string
|
||||||
|
port string
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithHost(host string) ServerOption {
|
||||||
|
return func(o *options) {
|
||||||
|
o.host = host
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithPort(port string) ServerOption {
|
||||||
|
return func(o *options) {
|
||||||
|
o.port = port
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validate(o *options) error {
|
||||||
|
port, err := strconv.ParseUint(o.port, 10, 16)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("prometheus port %q could not be parsed as a port number: %w",
|
||||||
|
o.port, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if port < minPrometheusPort || port > maxPrometheusPort {
|
||||||
|
return fmt.Errorf("prometheus port %d, should be between %d and %d",
|
||||||
|
port, minPrometheusPort, maxPrometheusPort)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func envOverride(target *string, envName string) {
|
||||||
|
val := os.Getenv(envName)
|
||||||
|
if val != "" {
|
||||||
|
*target = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,74 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewServerWithOptions(t *testing.T) {
|
||||||
|
s, err := NewServer(
|
||||||
|
WithHost("127.0.0.1"),
|
||||||
|
WithPort("57289"),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("NewServer() =", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
got := s.http.Addr
|
||||||
|
want := "127.0.0.1:57289"
|
||||||
|
|
||||||
|
if diff := cmp.Diff(want, got); diff != "" {
|
||||||
|
t.Error("unexpected diff (-want, +got) : ", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewServerEnvOverride(t *testing.T) {
|
||||||
|
t.Setenv(prometheusHostEnvName, "0.0.0.0")
|
||||||
|
t.Setenv(prometheusPortEnvName, "1028")
|
||||||
|
|
||||||
|
s, err := NewServer(
|
||||||
|
WithHost("127.0.0.1"),
|
||||||
|
WithPort("57289"),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("NewServer() =", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
got := s.http.Addr
|
||||||
|
want := "0.0.0.0:1028"
|
||||||
|
|
||||||
|
if diff := cmp.Diff(want, got); diff != "" {
|
||||||
|
t.Error("unexpected diff (-want, +got) : ", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewServerFailure(t *testing.T) {
|
||||||
|
if _, err := NewServer(WithPort("1000000")); err == nil {
|
||||||
|
t.Error("expected port parsing to fail")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := NewServer(WithPort("80")); err == nil {
|
||||||
|
t.Error("expected below port range to fail")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := NewServer(WithPort("65536")); err == nil {
|
||||||
|
t.Error("expected above port range to fail")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,29 @@
|
||||||
|
//go:build noprometheus
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
)
|
||||||
|
|
||||||
|
func buildPrometheus(ctx context.Context, cfg Config) (sdkmetric.Reader, shutdownFunc, error) {
|
||||||
|
panic("prometheus exporter is not supported")
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,61 @@
|
||||||
|
//go:build !noprometheus
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
otelprom "go.opentelemetry.io/otel/exporters/prometheus"
|
||||||
|
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"knative.dev/pkg/observability/metrics/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func buildPrometheus(_ context.Context, cfg Config) (sdkmetric.Reader, shutdownFunc, error) {
|
||||||
|
r, err := otelprom.New()
|
||||||
|
if err != nil {
|
||||||
|
return nil, noopFunc, fmt.Errorf("unable to create otel prometheus exporter: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var opts []prometheus.ServerOption
|
||||||
|
|
||||||
|
if cfg.Endpoint != "" {
|
||||||
|
host, port, err := net.SplitHostPort(cfg.Endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, noopFunc, fmt.Errorf(
|
||||||
|
`metrics-endpoint %q for prometheus exporter should be "host:port", "host%%zone:port", "[host]:port" or "[host%%zone]:port"`,
|
||||||
|
cfg.Endpoint,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts = append(opts,
|
||||||
|
prometheus.WithHost(host),
|
||||||
|
prometheus.WithPort(port),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
server, err := prometheus.NewServer(opts...)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
server.ListenAndServe()
|
||||||
|
}()
|
||||||
|
|
||||||
|
return r, server.Shutdown, err
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,149 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||||
|
"go.opentelemetry.io/otel/metric"
|
||||||
|
"go.opentelemetry.io/otel/metric/noop"
|
||||||
|
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
)
|
||||||
|
|
||||||
|
type shutdownFunc func(ctx context.Context) error
|
||||||
|
|
||||||
|
func noopFunc(context.Context) error { return nil }
|
||||||
|
|
||||||
|
type MeterProvider struct {
|
||||||
|
metric.MeterProvider
|
||||||
|
shutdown []shutdownFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MeterProvider) Shutdown(ctx context.Context) error {
|
||||||
|
var errs []error
|
||||||
|
for _, shutdown := range m.shutdown {
|
||||||
|
if err := shutdown(ctx); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errors.Join(errs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMeterProvider(
|
||||||
|
ctx context.Context,
|
||||||
|
cfg Config,
|
||||||
|
opts ...sdkmetric.Option,
|
||||||
|
) (*MeterProvider, error) {
|
||||||
|
if cfg.Protocol == ProtocolNone {
|
||||||
|
return &MeterProvider{MeterProvider: noop.NewMeterProvider()}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
reader, rShutdown, err := readerFor(ctx, cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating reader: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
opts = append(opts, sdkmetric.WithReader(reader))
|
||||||
|
provider := sdkmetric.NewMeterProvider(opts...)
|
||||||
|
|
||||||
|
return &MeterProvider{
|
||||||
|
MeterProvider: provider,
|
||||||
|
shutdown: []shutdownFunc{provider.Shutdown, rShutdown},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readerFor(ctx context.Context, cfg Config) (sdkmetric.Reader, shutdownFunc, error) {
|
||||||
|
switch cfg.Protocol {
|
||||||
|
case ProtocolGRPC:
|
||||||
|
return buildGRPC(ctx, cfg)
|
||||||
|
case ProtocolHTTPProtobuf:
|
||||||
|
return buildHTTP(ctx, cfg)
|
||||||
|
case ProtocolPrometheus:
|
||||||
|
return buildPrometheus(ctx, cfg)
|
||||||
|
default:
|
||||||
|
return nil, noopFunc, fmt.Errorf("unsupported metric exporter: %q", cfg.Protocol)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildGRPC(ctx context.Context, cfg Config) (sdkmetric.Reader, shutdownFunc, error) {
|
||||||
|
var grpcOpts []otlpmetricgrpc.Option
|
||||||
|
|
||||||
|
if opt := endpointFor(cfg, otlpmetricgrpc.WithEndpoint); opt != nil {
|
||||||
|
grpcOpts = append(grpcOpts, opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
exporter, err := otlpmetricgrpc.New(ctx, grpcOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, noopFunc, fmt.Errorf("failed to build exporter: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var opts []sdkmetric.PeriodicReaderOption
|
||||||
|
|
||||||
|
if interval := intervalFor(cfg); interval != nil {
|
||||||
|
opts = append(opts, interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
return sdkmetric.NewPeriodicReader(exporter, opts...), noopFunc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildHTTP(ctx context.Context, cfg Config) (sdkmetric.Reader, shutdownFunc, error) {
|
||||||
|
var httpOpts []otlpmetrichttp.Option
|
||||||
|
|
||||||
|
if opt := endpointFor(cfg, otlpmetrichttp.WithEndpoint); opt != nil {
|
||||||
|
httpOpts = append(httpOpts, opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
exporter, err := otlpmetrichttp.New(ctx, httpOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, noopFunc, fmt.Errorf("failed to build exporter: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var opts []sdkmetric.PeriodicReaderOption
|
||||||
|
if opt := intervalFor(cfg); opt != nil {
|
||||||
|
opts = append(opts, opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
return sdkmetric.NewPeriodicReader(exporter, opts...), noopFunc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func intervalFor(cfg Config) sdkmetric.PeriodicReaderOption {
|
||||||
|
var interval sdkmetric.PeriodicReaderOption
|
||||||
|
|
||||||
|
// Use the configuration's export interval if it's non-zero and there isn't
|
||||||
|
// an environment override
|
||||||
|
if os.Getenv("OTEL_METRIC_EXPORT_INTERVAL") == "" && cfg.ExportInterval != 0 {
|
||||||
|
interval = sdkmetric.WithInterval(cfg.ExportInterval)
|
||||||
|
}
|
||||||
|
|
||||||
|
return interval
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||||
|
func endpointFor[T any](cfg Config, opt func(string) T) T {
|
||||||
|
var epOption T
|
||||||
|
|
||||||
|
if (os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT") == "" &&
|
||||||
|
os.Getenv("OTEL_EXPORTER_OTLP_METRICS_ENDPOINT") == "") && cfg.Endpoint != "" {
|
||||||
|
epOption = opt(cfg.Endpoint)
|
||||||
|
}
|
||||||
|
return epOption
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,160 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewMeterProviderProtocols(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
c Config
|
||||||
|
wantErr bool
|
||||||
|
}{{
|
||||||
|
name: "grpc",
|
||||||
|
c: Config{Protocol: ProtocolGRPC},
|
||||||
|
}, {
|
||||||
|
name: "none",
|
||||||
|
c: Config{Protocol: ProtocolNone},
|
||||||
|
}, {
|
||||||
|
name: "http",
|
||||||
|
c: Config{Protocol: ProtocolHTTPProtobuf},
|
||||||
|
}, {
|
||||||
|
name: "prometheus",
|
||||||
|
c: Config{Protocol: ProtocolPrometheus},
|
||||||
|
}, {
|
||||||
|
name: "bad protocol",
|
||||||
|
c: Config{Protocol: "bad"},
|
||||||
|
wantErr: true,
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
_, err := NewMeterProvider(ctx, tc.c)
|
||||||
|
|
||||||
|
if !tc.wantErr && err != nil {
|
||||||
|
t.Error("unexpected failed", err)
|
||||||
|
} else if tc.wantErr && err == nil {
|
||||||
|
t.Error("expected failure")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrometheusEndpoint(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
c Config
|
||||||
|
wantErr bool
|
||||||
|
}{{
|
||||||
|
name: "ipv4",
|
||||||
|
c: Config{
|
||||||
|
Protocol: ProtocolPrometheus,
|
||||||
|
Endpoint: "0.0.0.0:9000", // IPv4 only on port 9000
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
name: "ipv6",
|
||||||
|
c: Config{
|
||||||
|
Protocol: ProtocolPrometheus,
|
||||||
|
Endpoint: "[::]:9000", // IPv6 only on port 9000
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
name: "bad endpoint",
|
||||||
|
c: Config{
|
||||||
|
Protocol: ProtocolPrometheus,
|
||||||
|
Endpoint: "[:::9000", // IPv6 only on port 9000
|
||||||
|
},
|
||||||
|
wantErr: true,
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
_, err := NewMeterProvider(ctx, tc.c)
|
||||||
|
|
||||||
|
if !tc.wantErr && err != nil {
|
||||||
|
t.Error("unexpected failed", err)
|
||||||
|
} else if tc.wantErr && err == nil {
|
||||||
|
t.Error("expected failure")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEndpointFor(t *testing.T) {
|
||||||
|
cases := []string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT",
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_ENDPOINT",
|
||||||
|
}
|
||||||
|
|
||||||
|
optFunc := func(string) *string {
|
||||||
|
panic("unexpected call")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, envKey := range cases {
|
||||||
|
t.Run("override "+envKey, func(t *testing.T) {
|
||||||
|
t.Setenv(envKey, "https://override.example.com")
|
||||||
|
|
||||||
|
opt := endpointFor(Config{Endpoint: "https://otel.example.com"}, optFunc)
|
||||||
|
|
||||||
|
if opt != nil {
|
||||||
|
t.Error("expected the option to not be present when 'OTEL_EXPORTER_OTLP_ENDPOINT' is set")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("normal", func(t *testing.T) {
|
||||||
|
optFunc := func(string) *string {
|
||||||
|
result := "result"
|
||||||
|
return &result
|
||||||
|
}
|
||||||
|
opt := endpointFor(Config{Endpoint: "https://otel.example.com"}, optFunc)
|
||||||
|
if *opt != "result" {
|
||||||
|
t.Error("expected option to work when no env vars are present")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIntervalFor(t *testing.T) {
|
||||||
|
cases := []string{
|
||||||
|
"OTEL_METRIC_EXPORT_INTERVAL",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, envKey := range cases {
|
||||||
|
t.Run("override "+envKey, func(t *testing.T) {
|
||||||
|
t.Setenv(envKey, "https://override.example.com")
|
||||||
|
|
||||||
|
opt := intervalFor(Config{ExportInterval: time.Second})
|
||||||
|
|
||||||
|
if opt != nil {
|
||||||
|
t.Error("expected the option to not be present when 'OTEL_EXPORTER_OTLP_ENDPOINT' is set")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("normal", func(t *testing.T) {
|
||||||
|
opt := intervalFor(Config{ExportInterval: time.Second})
|
||||||
|
|
||||||
|
if opt == nil {
|
||||||
|
t.Error("expected the interval option be present when env vars are not set")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,74 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
configmap "knative.dev/pkg/configmap/parser"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ProfilingEnabled = "enabled"
|
||||||
|
ProfilingDisabled = "disabled"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Profiling string
|
||||||
|
ExportInterval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) Validate() error {
|
||||||
|
switch c.Profiling {
|
||||||
|
case ProfilingEnabled, ProfilingDisabled:
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported profile setting %q", c.Profiling)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportInterval == 0 => OTel will use a default value
|
||||||
|
if c.ExportInterval < 0 {
|
||||||
|
return fmt.Errorf("export interval %q should be greater than zero", c.ExportInterval)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) ProfilingEnabled() bool {
|
||||||
|
return c.Profiling == ProfilingEnabled
|
||||||
|
}
|
||||||
|
|
||||||
|
func DefaultConfig() Config {
|
||||||
|
return Config{
|
||||||
|
Profiling: ProfilingDisabled,
|
||||||
|
// same as OTel runtime.DefaultMinimumReadMemStatsInterval
|
||||||
|
ExportInterval: 15 * time.Second,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFromMap(m map[string]string) (Config, error) {
|
||||||
|
c := DefaultConfig()
|
||||||
|
|
||||||
|
err := configmap.Parse(m,
|
||||||
|
configmap.As("runtime-profiling", &c.Profiling),
|
||||||
|
configmap.As("runtime-export-interval", &c.ExportInterval),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return c, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, c.Validate()
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,67 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewFromMap(t *testing.T) {
|
||||||
|
got, err := NewFromMap(nil)
|
||||||
|
want := DefaultConfig()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Error("unexpected error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(want, got); diff != "" {
|
||||||
|
t.Error("unexpected diff (-want +got): ", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProfilingEnabled(t *testing.T) {
|
||||||
|
c := Config{Profiling: ProfilingEnabled}
|
||||||
|
if !c.ProfilingEnabled() {
|
||||||
|
t.Error("profiling should be enabled")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewFromMapBadInput(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
m map[string]string
|
||||||
|
}{{
|
||||||
|
name: "unsupported profiling setting",
|
||||||
|
m: map[string]string{"runtime-profiling": "bad-setting"},
|
||||||
|
}, {
|
||||||
|
name: "bad export interval - negative number",
|
||||||
|
m: map[string]string{"runtime-export-interval": "-1s"},
|
||||||
|
}, {
|
||||||
|
name: "bad export interval - not an integer",
|
||||||
|
m: map[string]string{"runtime-export-interval": "bad-interval"},
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
if _, err := NewFromMap(tc.m); err == nil {
|
||||||
|
t.Error("expected an error")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,19 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package runtime provides configuration to publish Go runtime
|
||||||
|
// statistics and HTTP helpers to expose Go runtime profiling data
|
||||||
|
package runtime
|
||||||
|
|
@ -0,0 +1,97 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/pprof"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ProfilingPortEnvKey specified the name of an environment variable that
|
||||||
|
// may be used to override the default profiling port.
|
||||||
|
ProfilingPortEnvKey = "PROFILING_PORT"
|
||||||
|
|
||||||
|
// ProfilingDefaultPort specifies the default port where profiling data is available when profiling is enabled
|
||||||
|
ProfilingDefaultPort = 8008
|
||||||
|
)
|
||||||
|
|
||||||
|
type ProfilingServer struct {
|
||||||
|
*http.Server
|
||||||
|
*ProfilingHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
type ProfilingHandler struct {
|
||||||
|
enabled *atomic.Bool
|
||||||
|
handler http.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHandler create a new ProfilingHandler which serves runtime profiling data
|
||||||
|
// according to the given context path
|
||||||
|
func NewProfilingHandler() *ProfilingHandler {
|
||||||
|
const pprofPrefix = "GET /debug/pprof/"
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.HandleFunc(pprofPrefix, pprof.Index)
|
||||||
|
mux.HandleFunc(pprofPrefix+"cmdline", pprof.Cmdline)
|
||||||
|
mux.HandleFunc(pprofPrefix+"profile", pprof.Profile)
|
||||||
|
mux.HandleFunc(pprofPrefix+"symbol", pprof.Symbol)
|
||||||
|
mux.HandleFunc(pprofPrefix+"trace", pprof.Trace)
|
||||||
|
|
||||||
|
enabled := &atomic.Bool{}
|
||||||
|
enabled.Store(false)
|
||||||
|
|
||||||
|
return &ProfilingHandler{
|
||||||
|
handler: mux,
|
||||||
|
enabled: enabled,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ProfilingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if h.enabled.Load() {
|
||||||
|
h.handler.ServeHTTP(w, r)
|
||||||
|
} else {
|
||||||
|
http.NotFound(w, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ProfilingHandler) SetEnabled(enabled bool) (old bool) {
|
||||||
|
return h.enabled.Swap(enabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServer creates a new http server that exposes profiling data on the default profiling port
|
||||||
|
func NewProfilingServer() *ProfilingServer {
|
||||||
|
port := os.Getenv(ProfilingPortEnvKey)
|
||||||
|
if port == "" {
|
||||||
|
port = strconv.Itoa(ProfilingDefaultPort)
|
||||||
|
}
|
||||||
|
|
||||||
|
h := NewProfilingHandler()
|
||||||
|
return &ProfilingServer{
|
||||||
|
ProfilingHandler: h,
|
||||||
|
Server: &http.Server{
|
||||||
|
Addr: ":" + port,
|
||||||
|
Handler: h,
|
||||||
|
// https://medium.com/a-journey-with-go/go-understand-and-mitigate-slowloris-attack-711c1b1403f6
|
||||||
|
ReadHeaderTimeout: 15 * time.Second,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,60 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewProfilingHandler(t *testing.T) {
|
||||||
|
h := NewProfilingHandler()
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
r, _ := http.NewRequest(http.MethodGet, "/debug/pprof/", nil)
|
||||||
|
h.ServeHTTP(w, r)
|
||||||
|
|
||||||
|
resp := w.Result()
|
||||||
|
if got, want := resp.StatusCode, http.StatusNotFound; got != want {
|
||||||
|
t.Errorf("unexpected status code %d want: %d", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
w = httptest.NewRecorder()
|
||||||
|
h.SetEnabled(true)
|
||||||
|
h.ServeHTTP(w, r)
|
||||||
|
|
||||||
|
resp = w.Result()
|
||||||
|
if got, want := resp.StatusCode, http.StatusOK; got != want {
|
||||||
|
t.Errorf("unexpected status code %d want: %d", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewProfilingServerPort(t *testing.T) {
|
||||||
|
s := NewProfilingServer()
|
||||||
|
|
||||||
|
if got, want := s.Server.Addr, ":8008"; got != want {
|
||||||
|
t.Errorf("uexpected server port %q, want: %q", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Setenv(ProfilingPortEnvKey, "8181")
|
||||||
|
s = NewProfilingServer()
|
||||||
|
|
||||||
|
if got, want := s.Server.Addr, ":8181"; got != want {
|
||||||
|
t.Errorf("uexpected server port %q, want: %q", got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,83 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package tracing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
configmap "knative.dev/pkg/configmap/parser"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ProtocolGRPC = "grpc"
|
||||||
|
ProtocolHTTPProtobuf = "http/protobuf"
|
||||||
|
ProtocolNone = "none"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Protocol string
|
||||||
|
Endpoint string
|
||||||
|
SamplingRate float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) Validate() error {
|
||||||
|
switch c.Protocol {
|
||||||
|
case ProtocolGRPC, ProtocolHTTPProtobuf:
|
||||||
|
if c.Endpoint == "" {
|
||||||
|
return fmt.Errorf("endpoint should be set for protocol %q", c.Protocol)
|
||||||
|
}
|
||||||
|
case ProtocolNone:
|
||||||
|
if c.Endpoint != "" {
|
||||||
|
return errors.New("endpoint should not be set when protocol is 'none'")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported protocol %q", c.Protocol)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.SamplingRate < 0 {
|
||||||
|
return fmt.Errorf("sampling rate %f should be greater or equal to zero", c.SamplingRate)
|
||||||
|
} else if c.SamplingRate > 1.0 {
|
||||||
|
return fmt.Errorf("sampling rate %f should be less than or equal to one", c.SamplingRate)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func DefaultConfig() Config {
|
||||||
|
return Config{
|
||||||
|
Protocol: ProtocolNone,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFromMap(m map[string]string) (Config, error) {
|
||||||
|
return NewFromMapWithPrefix("", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFromMapWithPrefix(prefix string, m map[string]string) (Config, error) {
|
||||||
|
c := DefaultConfig()
|
||||||
|
|
||||||
|
err := configmap.Parse(m,
|
||||||
|
configmap.As(prefix+"tracing-protocol", &c.Protocol),
|
||||||
|
configmap.As(prefix+"tracing-endpoint", &c.Endpoint),
|
||||||
|
configmap.As(prefix+"tracing-sampling-rate", &c.SamplingRate),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return c, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, c.Validate()
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,90 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package tracing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewFromMap(t *testing.T) {
|
||||||
|
got, err := NewFromMap(nil)
|
||||||
|
want := DefaultConfig()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Error("unexpected error:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(want, got); diff != "" {
|
||||||
|
t.Error("unexpected diff (-want +got): ", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewFromMapBadInput(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
m map[string]string
|
||||||
|
}{{
|
||||||
|
name: "unexpected endpoint set",
|
||||||
|
m: map[string]string{"tracing-endpoint": "https://blah.example.com"},
|
||||||
|
}, {
|
||||||
|
name: "missing endpoint",
|
||||||
|
m: map[string]string{"tracing-protocol": "grpc"},
|
||||||
|
}, {
|
||||||
|
name: "unsupported protocol",
|
||||||
|
m: map[string]string{"tracing-protocol": "bad-protocol"},
|
||||||
|
}, {
|
||||||
|
name: "bad sampling rate - less than zero",
|
||||||
|
m: map[string]string{"tracing-sampling-rate": "-1"},
|
||||||
|
}, {
|
||||||
|
name: "bad sampling rate - greater than one",
|
||||||
|
m: map[string]string{"tracing-sampling-rate": "1.1"},
|
||||||
|
}, {
|
||||||
|
name: "bad sampling rate - not a float",
|
||||||
|
m: map[string]string{"tracing-sampling-rate": "bad-rate"},
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
if _, err := NewFromMap(tc.m); err == nil {
|
||||||
|
t.Error("expected an error")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewFromMapWithPrefix(t *testing.T) {
|
||||||
|
got, err := NewFromMapWithPrefix("request-", map[string]string{
|
||||||
|
"request-tracing-protocol": ProtocolGRPC,
|
||||||
|
"request-tracing-endpoint": "https://blah.example.com",
|
||||||
|
"request-tracing-sampling-rate": "0.8",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Error("unexpected error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
want := Config{
|
||||||
|
Protocol: ProtocolGRPC,
|
||||||
|
Endpoint: "https://blah.example.com",
|
||||||
|
SamplingRate: 0.8,
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(want, got); diff != "" {
|
||||||
|
t.Error("unexpected diff (-want +got): ", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,162 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package tracing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
|
||||||
|
"go.opentelemetry.io/otel/propagation"
|
||||||
|
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
"go.opentelemetry.io/otel/trace/noop"
|
||||||
|
)
|
||||||
|
|
||||||
|
func noopFunc(context.Context) error { return nil }
|
||||||
|
|
||||||
|
type TracerProvider struct {
|
||||||
|
trace.TracerProvider
|
||||||
|
shutdown func(context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *TracerProvider) Shutdown(ctx context.Context) error {
|
||||||
|
return m.shutdown(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func DefaultTextMapPropagator() propagation.TextMapPropagator {
|
||||||
|
return propagation.NewCompositeTextMapPropagator(
|
||||||
|
propagation.TraceContext{},
|
||||||
|
propagation.Baggage{},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTracerProvider(
|
||||||
|
ctx context.Context,
|
||||||
|
cfg Config,
|
||||||
|
opts ...sdktrace.TracerProviderOption,
|
||||||
|
) (*TracerProvider, error) {
|
||||||
|
if cfg.Protocol == ProtocolNone {
|
||||||
|
return &TracerProvider{
|
||||||
|
TracerProvider: noop.NewTracerProvider(),
|
||||||
|
shutdown: noopFunc,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
exp, err := exporterFor(ctx, cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating tracer exporter: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sampler, err := sampleFor(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating tracer sampler: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
provider := sdktrace.NewTracerProvider(
|
||||||
|
sdktrace.WithBatcher(exp),
|
||||||
|
sdktrace.WithSampler(sampler),
|
||||||
|
)
|
||||||
|
return &TracerProvider{
|
||||||
|
TracerProvider: provider,
|
||||||
|
shutdown: provider.Shutdown,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func exporterFor(ctx context.Context, cfg Config) (sdktrace.SpanExporter, error) {
|
||||||
|
switch cfg.Protocol {
|
||||||
|
case ProtocolGRPC:
|
||||||
|
return buildGRPC(ctx, cfg)
|
||||||
|
case ProtocolHTTPProtobuf:
|
||||||
|
return buildHTTP(ctx, cfg)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported metric exporter: %q", cfg.Protocol)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildGRPC(ctx context.Context, cfg Config) (sdktrace.SpanExporter, error) {
|
||||||
|
var grpcOpts []otlptracegrpc.Option
|
||||||
|
|
||||||
|
if opt := endpointFor(cfg, otlptracegrpc.WithEndpoint); opt != nil {
|
||||||
|
grpcOpts = append(grpcOpts, opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
exporter, err := otlptracegrpc.New(ctx, grpcOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to build exporter: %w", err)
|
||||||
|
}
|
||||||
|
return exporter, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildHTTP(ctx context.Context, cfg Config) (sdktrace.SpanExporter, error) {
|
||||||
|
var httpOpts []otlptracehttp.Option
|
||||||
|
|
||||||
|
if opt := endpointFor(cfg, otlptracehttp.WithEndpoint); opt != nil {
|
||||||
|
httpOpts = append(httpOpts, opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
exporter, err := otlptracehttp.New(ctx, httpOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to build exporter: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return exporter, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT is
|
||||||
|
// set then we will prefer that over what's in the Config
|
||||||
|
func endpointFor[T any](cfg Config, opt func(string) T) T {
|
||||||
|
var epOption T
|
||||||
|
|
||||||
|
if (os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT") == "" &&
|
||||||
|
os.Getenv("OTEL_EXPORTER_OTLP_TRACES_ENDPOINT") == "") && cfg.Endpoint != "" {
|
||||||
|
epOption = opt(cfg.Endpoint)
|
||||||
|
}
|
||||||
|
return epOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func sampleFor(cfg Config) (sdktrace.Sampler, error) {
|
||||||
|
// Don't override env arg
|
||||||
|
if os.Getenv("OTEL_TRACES_SAMPLER") != "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rate := cfg.SamplingRate
|
||||||
|
|
||||||
|
if val := os.Getenv("OTEL_TRACES_SAMPLER_ARG"); val != "" {
|
||||||
|
override, err := strconv.ParseFloat(val, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to parse sample rate override: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rate = override
|
||||||
|
}
|
||||||
|
|
||||||
|
if rate >= 1.0 {
|
||||||
|
return sdktrace.AlwaysSample(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.SamplingRate <= 0.0 {
|
||||||
|
return sdktrace.NeverSample(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
root := sdktrace.TraceIDRatioBased(cfg.SamplingRate)
|
||||||
|
return sdktrace.ParentBased(root), nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,169 @@
|
||||||
|
/*
|
||||||
|
Copyright 2025 The Knative Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package tracing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewTrackerProviderProtocols(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
c Config
|
||||||
|
wantErr bool
|
||||||
|
}{{
|
||||||
|
name: "grpc",
|
||||||
|
c: Config{Protocol: ProtocolGRPC},
|
||||||
|
}, {
|
||||||
|
name: "none",
|
||||||
|
c: Config{Protocol: ProtocolNone},
|
||||||
|
}, {
|
||||||
|
name: "http",
|
||||||
|
c: Config{Protocol: ProtocolHTTPProtobuf},
|
||||||
|
}, {
|
||||||
|
name: "bad protocol",
|
||||||
|
c: Config{Protocol: "bad"},
|
||||||
|
wantErr: true,
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
_, err := NewTracerProvider(ctx, tc.c)
|
||||||
|
|
||||||
|
if !tc.wantErr && err != nil {
|
||||||
|
t.Error("unexpected failed", err)
|
||||||
|
} else if tc.wantErr && err == nil {
|
||||||
|
t.Error("expected failure")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEndpointFor(t *testing.T) {
|
||||||
|
cases := []string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT",
|
||||||
|
"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT",
|
||||||
|
}
|
||||||
|
|
||||||
|
optFunc := func(string) *string {
|
||||||
|
panic("unexpected call")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, envKey := range cases {
|
||||||
|
t.Run("override "+envKey, func(t *testing.T) {
|
||||||
|
t.Setenv(envKey, "https://override.example.com")
|
||||||
|
|
||||||
|
opt := endpointFor(Config{Endpoint: "https://otel.example.com"}, optFunc)
|
||||||
|
|
||||||
|
if opt != nil {
|
||||||
|
t.Error("expected the option to not be present when 'OTEL_EXPORTER_OTLP_ENDPOINT' is set")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("normal", func(t *testing.T) {
|
||||||
|
optFunc := func(string) *string {
|
||||||
|
result := "result"
|
||||||
|
return &result
|
||||||
|
}
|
||||||
|
opt := endpointFor(Config{Endpoint: "https://otel.example.com"}, optFunc)
|
||||||
|
if *opt != "result" {
|
||||||
|
t.Error("expected option to work when no env vars are present")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTracerProviderShutdown(t *testing.T) {
|
||||||
|
want := errors.New("some error")
|
||||||
|
invoked := false
|
||||||
|
shutdown := func(context.Context) error {
|
||||||
|
invoked = true
|
||||||
|
return want
|
||||||
|
}
|
||||||
|
|
||||||
|
p := TracerProvider{shutdown: shutdown}
|
||||||
|
got := p.Shutdown(context.Background())
|
||||||
|
|
||||||
|
if !invoked {
|
||||||
|
t.Fatal("expected shutdown to be invoked")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !errors.Is(got, want) {
|
||||||
|
t.Error("unexpected error (-want +got): ", cmp.Diff(want, got))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSampleFor(t *testing.T) {
|
||||||
|
cfg := Config{
|
||||||
|
SamplingRate: 0.85,
|
||||||
|
}
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
env map[string]string
|
||||||
|
wantNilSampler bool
|
||||||
|
wantErr bool
|
||||||
|
}{{
|
||||||
|
name: "sampler override",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_SAMPLER": "some-sampler",
|
||||||
|
},
|
||||||
|
wantNilSampler: true,
|
||||||
|
}, {
|
||||||
|
name: "standard sampler",
|
||||||
|
wantNilSampler: false,
|
||||||
|
}, {
|
||||||
|
name: "sample arg override",
|
||||||
|
wantNilSampler: false,
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_SAMPLER_ARG": "1.0",
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
name: "sample arg override - bad input",
|
||||||
|
wantNilSampler: true,
|
||||||
|
wantErr: true,
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_TRACES_SAMPLER_ARG": "bad-ratio",
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
for k, v := range tc.env {
|
||||||
|
t.Setenv(k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := sampleFor(cfg)
|
||||||
|
if tc.wantNilSampler && got != nil {
|
||||||
|
t.Error("expected a nil sampler")
|
||||||
|
} else if !tc.wantNilSampler && got == nil {
|
||||||
|
t.Error("expected a non-nil sampler")
|
||||||
|
}
|
||||||
|
|
||||||
|
if tc.wantErr && err == nil {
|
||||||
|
t.Error("expected an error")
|
||||||
|
} else if !tc.wantErr && err != nil {
|
||||||
|
t.Error("unexpected error")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,25 @@
|
||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
|
||||||
|
# IDEs
|
||||||
|
.idea/
|
||||||
|
|
@ -0,0 +1,29 @@
|
||||||
|
# Changelog
|
||||||
|
|
||||||
|
All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
||||||
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
|
## [5.0.0] - 2024-12-19
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- Retry function now accepts additional options for specifying max number of tries and max elapsed time.
|
||||||
|
- Retry function now accepts a context.Context.
|
||||||
|
- Operation function signature changed to return result (any type) and error.
|
||||||
|
|
||||||
|
### Removed
|
||||||
|
|
||||||
|
- RetryNotify* and RetryWithData functions. Only single Retry function remains.
|
||||||
|
- Optional arguments from ExponentialBackoff constructor.
|
||||||
|
- Clock and Timer interfaces.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- The original error is returned from Retry if there's a PermanentError. (#144)
|
||||||
|
- The Retry function respects the wrapped PermanentError. (#140)
|
||||||
|
|
@ -0,0 +1,20 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Cenk Altı
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
@ -0,0 +1,31 @@
|
||||||
|
# Exponential Backoff [![GoDoc][godoc image]][godoc]
|
||||||
|
|
||||||
|
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
|
||||||
|
|
||||||
|
[Exponential backoff][exponential backoff wiki]
|
||||||
|
is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
|
||||||
|
in order to gradually find an acceptable rate.
|
||||||
|
The retries exponentially increase and stop increasing when a certain threshold is met.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end.
|
||||||
|
|
||||||
|
For most cases, use `Retry` function. See [example_test.go][example] for an example.
|
||||||
|
|
||||||
|
If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
* I would like to keep this library as small as possible.
|
||||||
|
* Please don't send a PR without opening an issue and discussing it first.
|
||||||
|
* If proposed change is not a common use case, I will probably not accept it.
|
||||||
|
|
||||||
|
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5
|
||||||
|
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
|
||||||
|
|
||||||
|
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
|
||||||
|
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
|
||||||
|
|
||||||
|
[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go
|
||||||
|
[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go
|
||||||
|
|
@ -0,0 +1,66 @@
|
||||||
|
// Package backoff implements backoff algorithms for retrying operations.
|
||||||
|
//
|
||||||
|
// Use Retry function for retrying operations that may fail.
|
||||||
|
// If Retry does not meet your needs,
|
||||||
|
// copy/paste the function into your project and modify as you wish.
|
||||||
|
//
|
||||||
|
// There is also Ticker type similar to time.Ticker.
|
||||||
|
// You can use it if you need to work with channels.
|
||||||
|
//
|
||||||
|
// See Examples section below for usage examples.
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// BackOff is a backoff policy for retrying an operation.
|
||||||
|
type BackOff interface {
|
||||||
|
// NextBackOff returns the duration to wait before retrying the operation,
|
||||||
|
// backoff.Stop to indicate that no more retries should be made.
|
||||||
|
//
|
||||||
|
// Example usage:
|
||||||
|
//
|
||||||
|
// duration := backoff.NextBackOff()
|
||||||
|
// if duration == backoff.Stop {
|
||||||
|
// // Do not retry operation.
|
||||||
|
// } else {
|
||||||
|
// // Sleep for duration and retry operation.
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
NextBackOff() time.Duration
|
||||||
|
|
||||||
|
// Reset to initial state.
|
||||||
|
Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop indicates that no more retries should be made for use in NextBackOff().
|
||||||
|
const Stop time.Duration = -1
|
||||||
|
|
||||||
|
// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
|
||||||
|
// meaning that the operation is retried immediately without waiting, indefinitely.
|
||||||
|
type ZeroBackOff struct{}
|
||||||
|
|
||||||
|
func (b *ZeroBackOff) Reset() {}
|
||||||
|
|
||||||
|
func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
|
||||||
|
|
||||||
|
// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
|
||||||
|
// NextBackOff(), meaning that the operation should never be retried.
|
||||||
|
type StopBackOff struct{}
|
||||||
|
|
||||||
|
func (b *StopBackOff) Reset() {}
|
||||||
|
|
||||||
|
func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
|
||||||
|
|
||||||
|
// ConstantBackOff is a backoff policy that always returns the same backoff delay.
|
||||||
|
// This is in contrast to an exponential backoff policy,
|
||||||
|
// which returns a delay that grows longer as you call NextBackOff() over and over again.
|
||||||
|
type ConstantBackOff struct {
|
||||||
|
Interval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *ConstantBackOff) Reset() {}
|
||||||
|
func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
|
||||||
|
|
||||||
|
func NewConstantBackOff(d time.Duration) *ConstantBackOff {
|
||||||
|
return &ConstantBackOff{Interval: d}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,46 @@
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PermanentError signals that the operation should not be retried.
|
||||||
|
type PermanentError struct {
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Permanent wraps the given err in a *PermanentError.
|
||||||
|
func Permanent(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &PermanentError{
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns a string representation of the Permanent error.
|
||||||
|
func (e *PermanentError) Error() string {
|
||||||
|
return e.Err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap returns the wrapped error.
|
||||||
|
func (e *PermanentError) Unwrap() error {
|
||||||
|
return e.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryAfterError signals that the operation should be retried after the given duration.
|
||||||
|
type RetryAfterError struct {
|
||||||
|
Duration time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying.
|
||||||
|
func RetryAfter(seconds int) error {
|
||||||
|
return &RetryAfterError{Duration: time.Duration(seconds) * time.Second}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns a string representation of the RetryAfter error.
|
||||||
|
func (e *RetryAfterError) Error() string {
|
||||||
|
return fmt.Sprintf("retry after %s", e.Duration)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,125 @@
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||||
|
period for each retry attempt using a randomization function that grows exponentially.
|
||||||
|
|
||||||
|
NextBackOff() is calculated using the following formula:
|
||||||
|
|
||||||
|
randomized interval =
|
||||||
|
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||||
|
|
||||||
|
In other words NextBackOff() will range between the randomization factor
|
||||||
|
percentage below and above the retry interval.
|
||||||
|
|
||||||
|
For example, given the following parameters:
|
||||||
|
|
||||||
|
RetryInterval = 2
|
||||||
|
RandomizationFactor = 0.5
|
||||||
|
Multiplier = 2
|
||||||
|
|
||||||
|
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||||
|
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||||
|
|
||||||
|
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||||
|
|
||||||
|
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||||
|
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||||
|
|
||||||
|
The elapsed time can be reset by calling Reset().
|
||||||
|
|
||||||
|
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||||
|
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||||
|
|
||||||
|
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||||
|
|
||||||
|
1 0.5 [0.25, 0.75]
|
||||||
|
2 0.75 [0.375, 1.125]
|
||||||
|
3 1.125 [0.562, 1.687]
|
||||||
|
4 1.687 [0.8435, 2.53]
|
||||||
|
5 2.53 [1.265, 3.795]
|
||||||
|
6 3.795 [1.897, 5.692]
|
||||||
|
7 5.692 [2.846, 8.538]
|
||||||
|
8 8.538 [4.269, 12.807]
|
||||||
|
9 12.807 [6.403, 19.210]
|
||||||
|
10 19.210 backoff.Stop
|
||||||
|
|
||||||
|
Note: Implementation is not thread-safe.
|
||||||
|
*/
|
||||||
|
type ExponentialBackOff struct {
|
||||||
|
InitialInterval time.Duration
|
||||||
|
RandomizationFactor float64
|
||||||
|
Multiplier float64
|
||||||
|
MaxInterval time.Duration
|
||||||
|
|
||||||
|
currentInterval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default values for ExponentialBackOff.
|
||||||
|
const (
|
||||||
|
DefaultInitialInterval = 500 * time.Millisecond
|
||||||
|
DefaultRandomizationFactor = 0.5
|
||||||
|
DefaultMultiplier = 1.5
|
||||||
|
DefaultMaxInterval = 60 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||||
|
func NewExponentialBackOff() *ExponentialBackOff {
|
||||||
|
return &ExponentialBackOff{
|
||||||
|
InitialInterval: DefaultInitialInterval,
|
||||||
|
RandomizationFactor: DefaultRandomizationFactor,
|
||||||
|
Multiplier: DefaultMultiplier,
|
||||||
|
MaxInterval: DefaultMaxInterval,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||||
|
// Reset must be called before using b.
|
||||||
|
func (b *ExponentialBackOff) Reset() {
|
||||||
|
b.currentInterval = b.InitialInterval
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextBackOff calculates the next backoff interval using the formula:
|
||||||
|
//
|
||||||
|
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||||
|
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||||
|
if b.currentInterval == 0 {
|
||||||
|
b.currentInterval = b.InitialInterval
|
||||||
|
}
|
||||||
|
|
||||||
|
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||||
|
b.incrementCurrentInterval()
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increments the current interval by multiplying it with the multiplier.
|
||||||
|
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||||
|
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||||
|
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||||
|
b.currentInterval = b.MaxInterval
|
||||||
|
} else {
|
||||||
|
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a random value from the following interval:
|
||||||
|
//
|
||||||
|
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||||
|
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||||
|
if randomizationFactor == 0 {
|
||||||
|
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
|
||||||
|
}
|
||||||
|
var delta = randomizationFactor * float64(currentInterval)
|
||||||
|
var minInterval = float64(currentInterval) - delta
|
||||||
|
var maxInterval = float64(currentInterval) + delta
|
||||||
|
|
||||||
|
// Get a random value from the range [minInterval, maxInterval].
|
||||||
|
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||||
|
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||||
|
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,139 @@
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultMaxElapsedTime sets a default limit for the total retry duration.
|
||||||
|
const DefaultMaxElapsedTime = 15 * time.Minute
|
||||||
|
|
||||||
|
// Operation is a function that attempts an operation and may be retried.
|
||||||
|
type Operation[T any] func() (T, error)
|
||||||
|
|
||||||
|
// Notify is a function called on operation error with the error and backoff duration.
|
||||||
|
type Notify func(error, time.Duration)
|
||||||
|
|
||||||
|
// retryOptions holds configuration settings for the retry mechanism.
|
||||||
|
type retryOptions struct {
|
||||||
|
BackOff BackOff // Strategy for calculating backoff periods.
|
||||||
|
Timer timer // Timer to manage retry delays.
|
||||||
|
Notify Notify // Optional function to notify on each retry error.
|
||||||
|
MaxTries uint // Maximum number of retry attempts.
|
||||||
|
MaxElapsedTime time.Duration // Maximum total time for all retries.
|
||||||
|
}
|
||||||
|
|
||||||
|
type RetryOption func(*retryOptions)
|
||||||
|
|
||||||
|
// WithBackOff configures a custom backoff strategy.
|
||||||
|
func WithBackOff(b BackOff) RetryOption {
|
||||||
|
return func(args *retryOptions) {
|
||||||
|
args.BackOff = b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// withTimer sets a custom timer for managing delays between retries.
|
||||||
|
func withTimer(t timer) RetryOption {
|
||||||
|
return func(args *retryOptions) {
|
||||||
|
args.Timer = t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithNotify sets a notification function to handle retry errors.
|
||||||
|
func WithNotify(n Notify) RetryOption {
|
||||||
|
return func(args *retryOptions) {
|
||||||
|
args.Notify = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMaxTries limits the number of retry attempts.
|
||||||
|
func WithMaxTries(n uint) RetryOption {
|
||||||
|
return func(args *retryOptions) {
|
||||||
|
args.MaxTries = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMaxElapsedTime limits the total duration for retry attempts.
|
||||||
|
func WithMaxElapsedTime(d time.Duration) RetryOption {
|
||||||
|
return func(args *retryOptions) {
|
||||||
|
args.MaxElapsedTime = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retry attempts the operation until success, a permanent error, or backoff completion.
|
||||||
|
// It ensures the operation is executed at least once.
|
||||||
|
//
|
||||||
|
// Returns the operation result or error if retries are exhausted or context is cancelled.
|
||||||
|
func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) {
|
||||||
|
// Initialize default retry options.
|
||||||
|
args := &retryOptions{
|
||||||
|
BackOff: NewExponentialBackOff(),
|
||||||
|
Timer: &defaultTimer{},
|
||||||
|
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply user-provided options to the default settings.
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(args)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer args.Timer.Stop()
|
||||||
|
|
||||||
|
startedAt := time.Now()
|
||||||
|
args.BackOff.Reset()
|
||||||
|
for numTries := uint(1); ; numTries++ {
|
||||||
|
// Execute the operation.
|
||||||
|
res, err := operation()
|
||||||
|
if err == nil {
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop retrying if maximum tries exceeded.
|
||||||
|
if args.MaxTries > 0 && numTries >= args.MaxTries {
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle permanent errors without retrying.
|
||||||
|
var permanent *PermanentError
|
||||||
|
if errors.As(err, &permanent) {
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop retrying if context is cancelled.
|
||||||
|
if cerr := context.Cause(ctx); cerr != nil {
|
||||||
|
return res, cerr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate next backoff duration.
|
||||||
|
next := args.BackOff.NextBackOff()
|
||||||
|
if next == Stop {
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset backoff if RetryAfterError is encountered.
|
||||||
|
var retryAfter *RetryAfterError
|
||||||
|
if errors.As(err, &retryAfter) {
|
||||||
|
next = retryAfter.Duration
|
||||||
|
args.BackOff.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop retrying if maximum elapsed time exceeded.
|
||||||
|
if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime {
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notify on error if a notifier function is provided.
|
||||||
|
if args.Notify != nil {
|
||||||
|
args.Notify(err, next)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the next backoff period or context cancellation.
|
||||||
|
args.Timer.Start(next)
|
||||||
|
select {
|
||||||
|
case <-args.Timer.C():
|
||||||
|
case <-ctx.Done():
|
||||||
|
return res, context.Cause(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,83 @@
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
|
||||||
|
//
|
||||||
|
// Ticks will continue to arrive when the previous operation is still running,
|
||||||
|
// so operations that take a while to fail could run in quick succession.
|
||||||
|
type Ticker struct {
|
||||||
|
C <-chan time.Time
|
||||||
|
c chan time.Time
|
||||||
|
b BackOff
|
||||||
|
timer timer
|
||||||
|
stop chan struct{}
|
||||||
|
stopOnce sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTicker returns a new Ticker containing a channel that will send
|
||||||
|
// the time at times specified by the BackOff argument. Ticker is
|
||||||
|
// guaranteed to tick at least once. The channel is closed when Stop
|
||||||
|
// method is called or BackOff stops. It is not safe to manipulate the
|
||||||
|
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||||
|
// while the ticker is running.
|
||||||
|
func NewTicker(b BackOff) *Ticker {
|
||||||
|
c := make(chan time.Time)
|
||||||
|
t := &Ticker{
|
||||||
|
C: c,
|
||||||
|
c: c,
|
||||||
|
b: b,
|
||||||
|
timer: &defaultTimer{},
|
||||||
|
stop: make(chan struct{}),
|
||||||
|
}
|
||||||
|
t.b.Reset()
|
||||||
|
go t.run()
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop turns off a ticker. After Stop, no more ticks will be sent.
|
||||||
|
func (t *Ticker) Stop() {
|
||||||
|
t.stopOnce.Do(func() { close(t.stop) })
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Ticker) run() {
|
||||||
|
c := t.c
|
||||||
|
defer close(c)
|
||||||
|
|
||||||
|
// Ticker is guaranteed to tick at least once.
|
||||||
|
afterC := t.send(time.Now())
|
||||||
|
|
||||||
|
for {
|
||||||
|
if afterC == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case tick := <-afterC:
|
||||||
|
afterC = t.send(tick)
|
||||||
|
case <-t.stop:
|
||||||
|
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Ticker) send(tick time.Time) <-chan time.Time {
|
||||||
|
select {
|
||||||
|
case t.c <- tick:
|
||||||
|
case <-t.stop:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
next := t.b.NextBackOff()
|
||||||
|
if next == Stop {
|
||||||
|
t.Stop()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t.timer.Start(next)
|
||||||
|
return t.timer.C()
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,35 @@
|
||||||
|
package backoff
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
type timer interface {
|
||||||
|
Start(duration time.Duration)
|
||||||
|
Stop()
|
||||||
|
C() <-chan time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaultTimer implements Timer interface using time.Timer
|
||||||
|
type defaultTimer struct {
|
||||||
|
timer *time.Timer
|
||||||
|
}
|
||||||
|
|
||||||
|
// C returns the timers channel which receives the current time when the timer fires.
|
||||||
|
func (t *defaultTimer) C() <-chan time.Time {
|
||||||
|
return t.timer.C
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start starts the timer to fire after the given duration
|
||||||
|
func (t *defaultTimer) Start(duration time.Duration) {
|
||||||
|
if t.timer == nil {
|
||||||
|
t.timer = time.NewTimer(duration)
|
||||||
|
} else {
|
||||||
|
t.timer.Reset(duration)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop is called when the timer is not used anymore and resources may be freed.
|
||||||
|
func (t *defaultTimer) Stop() {
|
||||||
|
if t.timer != nil {
|
||||||
|
t.timer.Stop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,26 +1,28 @@
|
||||||
|
version: "2"
|
||||||
|
|
||||||
run:
|
run:
|
||||||
timeout: 1m
|
timeout: 1m
|
||||||
tests: true
|
tests: true
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
disable-all: true
|
default: none
|
||||||
enable:
|
enable: # please keep this alphabetized
|
||||||
|
- asasalint
|
||||||
- asciicheck
|
- asciicheck
|
||||||
|
- copyloopvar
|
||||||
|
- dupl
|
||||||
- errcheck
|
- errcheck
|
||||||
- forcetypeassert
|
- forcetypeassert
|
||||||
|
- goconst
|
||||||
- gocritic
|
- gocritic
|
||||||
- gofmt
|
|
||||||
- goimports
|
|
||||||
- gosimple
|
|
||||||
- govet
|
- govet
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- misspell
|
- misspell
|
||||||
|
- musttag
|
||||||
- revive
|
- revive
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- typecheck
|
|
||||||
- unused
|
- unused
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
exclude-use-default: false
|
|
||||||
max-issues-per-linter: 0
|
max-issues-per-linter: 0
|
||||||
max-same-issues: 10
|
max-same-issues: 10
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,914 @@
|
||||||
|
/*
|
||||||
|
Copyright 2021 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package funcr implements formatting of structured log messages and
|
||||||
|
// optionally captures the call site and timestamp.
|
||||||
|
//
|
||||||
|
// The simplest way to use it is via its implementation of a
|
||||||
|
// github.com/go-logr/logr.LogSink with output through an arbitrary
|
||||||
|
// "write" function. See New and NewJSON for details.
|
||||||
|
//
|
||||||
|
// # Custom LogSinks
|
||||||
|
//
|
||||||
|
// For users who need more control, a funcr.Formatter can be embedded inside
|
||||||
|
// your own custom LogSink implementation. This is useful when the LogSink
|
||||||
|
// needs to implement additional methods, for example.
|
||||||
|
//
|
||||||
|
// # Formatting
|
||||||
|
//
|
||||||
|
// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
|
||||||
|
// values which are being logged. When rendering a struct, funcr will use Go's
|
||||||
|
// standard JSON tags (all except "string").
|
||||||
|
package funcr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// New returns a logr.Logger which is implemented by an arbitrary function.
|
||||||
|
func New(fn func(prefix, args string), opts Options) logr.Logger {
|
||||||
|
return logr.New(newSink(fn, NewFormatter(opts)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewJSON returns a logr.Logger which is implemented by an arbitrary function
|
||||||
|
// and produces JSON output.
|
||||||
|
func NewJSON(fn func(obj string), opts Options) logr.Logger {
|
||||||
|
fnWrapper := func(_, obj string) {
|
||||||
|
fn(obj)
|
||||||
|
}
|
||||||
|
return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Underlier exposes access to the underlying logging function. Since
|
||||||
|
// callers only have a logr.Logger, they have to know which
|
||||||
|
// implementation is in use, so this interface is less of an
|
||||||
|
// abstraction and more of a way to test type conversion.
|
||||||
|
type Underlier interface {
|
||||||
|
GetUnderlying() func(prefix, args string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
|
||||||
|
l := &fnlogger{
|
||||||
|
Formatter: formatter,
|
||||||
|
write: fn,
|
||||||
|
}
|
||||||
|
// For skipping fnlogger.Info and fnlogger.Error.
|
||||||
|
l.AddCallDepth(1) // via Formatter
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options carries parameters which influence the way logs are generated.
|
||||||
|
type Options struct {
|
||||||
|
// LogCaller tells funcr to add a "caller" key to some or all log lines.
|
||||||
|
// This has some overhead, so some users might not want it.
|
||||||
|
LogCaller MessageClass
|
||||||
|
|
||||||
|
// LogCallerFunc tells funcr to also log the calling function name. This
|
||||||
|
// has no effect if caller logging is not enabled (see Options.LogCaller).
|
||||||
|
LogCallerFunc bool
|
||||||
|
|
||||||
|
// LogTimestamp tells funcr to add a "ts" key to log lines. This has some
|
||||||
|
// overhead, so some users might not want it.
|
||||||
|
LogTimestamp bool
|
||||||
|
|
||||||
|
// TimestampFormat tells funcr how to render timestamps when LogTimestamp
|
||||||
|
// is enabled. If not specified, a default format will be used. For more
|
||||||
|
// details, see docs for Go's time.Layout.
|
||||||
|
TimestampFormat string
|
||||||
|
|
||||||
|
// LogInfoLevel tells funcr what key to use to log the info level.
|
||||||
|
// If not specified, the info level will be logged as "level".
|
||||||
|
// If this is set to "", the info level will not be logged at all.
|
||||||
|
LogInfoLevel *string
|
||||||
|
|
||||||
|
// Verbosity tells funcr which V logs to produce. Higher values enable
|
||||||
|
// more logs. Info logs at or below this level will be written, while logs
|
||||||
|
// above this level will be discarded.
|
||||||
|
Verbosity int
|
||||||
|
|
||||||
|
// RenderBuiltinsHook allows users to mutate the list of key-value pairs
|
||||||
|
// while a log line is being rendered. The kvList argument follows logr
|
||||||
|
// conventions - each pair of slice elements is comprised of a string key
|
||||||
|
// and an arbitrary value (verified and sanitized before calling this
|
||||||
|
// hook). The value returned must follow the same conventions. This hook
|
||||||
|
// can be used to audit or modify logged data. For example, you might want
|
||||||
|
// to prefix all of funcr's built-in keys with some string. This hook is
|
||||||
|
// only called for built-in (provided by funcr itself) key-value pairs.
|
||||||
|
// Equivalent hooks are offered for key-value pairs saved via
|
||||||
|
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
|
||||||
|
// for user-provided pairs (see RenderArgsHook).
|
||||||
|
RenderBuiltinsHook func(kvList []any) []any
|
||||||
|
|
||||||
|
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
|
||||||
|
// only called for key-value pairs saved via logr.Logger.WithValues. See
|
||||||
|
// RenderBuiltinsHook for more details.
|
||||||
|
RenderValuesHook func(kvList []any) []any
|
||||||
|
|
||||||
|
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
|
||||||
|
// called for key-value pairs passed directly to Info and Error. See
|
||||||
|
// RenderBuiltinsHook for more details.
|
||||||
|
RenderArgsHook func(kvList []any) []any
|
||||||
|
|
||||||
|
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
|
||||||
|
// that contains a struct, etc.) it may log. Every time it finds a struct,
|
||||||
|
// slice, array, or map the depth is increased by one. When the maximum is
|
||||||
|
// reached, the value will be converted to a string indicating that the max
|
||||||
|
// depth has been exceeded. If this field is not specified, a default
|
||||||
|
// value will be used.
|
||||||
|
MaxLogDepth int
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageClass indicates which category or categories of messages to consider.
|
||||||
|
type MessageClass int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// None ignores all message classes.
|
||||||
|
None MessageClass = iota
|
||||||
|
// All considers all message classes.
|
||||||
|
All
|
||||||
|
// Info only considers info messages.
|
||||||
|
Info
|
||||||
|
// Error only considers error messages.
|
||||||
|
Error
|
||||||
|
)
|
||||||
|
|
||||||
|
// fnlogger inherits some of its LogSink implementation from Formatter
|
||||||
|
// and just needs to add some glue code.
|
||||||
|
type fnlogger struct {
|
||||||
|
Formatter
|
||||||
|
write func(prefix, args string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) WithName(name string) logr.LogSink {
|
||||||
|
l.AddName(name) // via Formatter
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
|
||||||
|
l.AddValues(kvList) // via Formatter
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
|
||||||
|
l.AddCallDepth(depth) // via Formatter
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) Info(level int, msg string, kvList ...any) {
|
||||||
|
prefix, args := l.FormatInfo(level, msg, kvList)
|
||||||
|
l.write(prefix, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) Error(err error, msg string, kvList ...any) {
|
||||||
|
prefix, args := l.FormatError(err, msg, kvList)
|
||||||
|
l.write(prefix, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) GetUnderlying() func(prefix, args string) {
|
||||||
|
return l.write
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assert conformance to the interfaces.
|
||||||
|
var _ logr.LogSink = &fnlogger{}
|
||||||
|
var _ logr.CallDepthLogSink = &fnlogger{}
|
||||||
|
var _ Underlier = &fnlogger{}
|
||||||
|
|
||||||
|
// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
|
||||||
|
func NewFormatter(opts Options) Formatter {
|
||||||
|
return newFormatter(opts, outputKeyValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFormatterJSON constructs a Formatter which emits strict JSON.
|
||||||
|
func NewFormatterJSON(opts Options) Formatter {
|
||||||
|
return newFormatter(opts, outputJSON)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Defaults for Options.
|
||||||
|
const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
|
||||||
|
const defaultMaxLogDepth = 16
|
||||||
|
|
||||||
|
func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
||||||
|
if opts.TimestampFormat == "" {
|
||||||
|
opts.TimestampFormat = defaultTimestampFormat
|
||||||
|
}
|
||||||
|
if opts.MaxLogDepth == 0 {
|
||||||
|
opts.MaxLogDepth = defaultMaxLogDepth
|
||||||
|
}
|
||||||
|
if opts.LogInfoLevel == nil {
|
||||||
|
opts.LogInfoLevel = new(string)
|
||||||
|
*opts.LogInfoLevel = "level"
|
||||||
|
}
|
||||||
|
f := Formatter{
|
||||||
|
outputFormat: outfmt,
|
||||||
|
prefix: "",
|
||||||
|
values: nil,
|
||||||
|
depth: 0,
|
||||||
|
opts: &opts,
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Formatter is an opaque struct which can be embedded in a LogSink
|
||||||
|
// implementation. It should be constructed with NewFormatter. Some of
|
||||||
|
// its methods directly implement logr.LogSink.
|
||||||
|
type Formatter struct {
|
||||||
|
outputFormat outputFormat
|
||||||
|
prefix string
|
||||||
|
values []any
|
||||||
|
valuesStr string
|
||||||
|
depth int
|
||||||
|
opts *Options
|
||||||
|
groupName string // for slog groups
|
||||||
|
groups []groupDef
|
||||||
|
}
|
||||||
|
|
||||||
|
// outputFormat indicates which outputFormat to use.
|
||||||
|
type outputFormat int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// outputKeyValue emits a JSON-like key=value format, but not strict JSON.
|
||||||
|
outputKeyValue outputFormat = iota
|
||||||
|
// outputJSON emits strict JSON.
|
||||||
|
outputJSON
|
||||||
|
)
|
||||||
|
|
||||||
|
// groupDef represents a saved group. The values may be empty, but we don't
|
||||||
|
// know if we need to render the group until the final record is rendered.
|
||||||
|
type groupDef struct {
|
||||||
|
name string
|
||||||
|
values string
|
||||||
|
}
|
||||||
|
|
||||||
|
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
|
||||||
|
type PseudoStruct []any
|
||||||
|
|
||||||
|
// render produces a log line, ready to use.
|
||||||
|
func (f Formatter) render(builtins, args []any) string {
|
||||||
|
// Empirically bytes.Buffer is faster than strings.Builder for this.
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
buf.WriteByte('{') // for the whole record
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render builtins
|
||||||
|
vals := builtins
|
||||||
|
if hook := f.opts.RenderBuiltinsHook; hook != nil {
|
||||||
|
vals = hook(f.sanitize(vals))
|
||||||
|
}
|
||||||
|
f.flatten(buf, vals, false) // keys are ours, no need to escape
|
||||||
|
continuing := len(builtins) > 0
|
||||||
|
|
||||||
|
// Turn the inner-most group into a string
|
||||||
|
argsStr := func() string {
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
|
||||||
|
vals = args
|
||||||
|
if hook := f.opts.RenderArgsHook; hook != nil {
|
||||||
|
vals = hook(f.sanitize(vals))
|
||||||
|
}
|
||||||
|
f.flatten(buf, vals, true) // escape user-provided keys
|
||||||
|
|
||||||
|
return buf.String()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Render the stack of groups from the inside out.
|
||||||
|
bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr)
|
||||||
|
for i := len(f.groups) - 1; i >= 0; i-- {
|
||||||
|
grp := &f.groups[i]
|
||||||
|
if grp.values == "" && bodyStr == "" {
|
||||||
|
// no contents, so we must elide the whole group
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
bodyStr = f.renderGroup(grp.name, grp.values, bodyStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if bodyStr != "" {
|
||||||
|
if continuing {
|
||||||
|
buf.WriteByte(f.comma())
|
||||||
|
}
|
||||||
|
buf.WriteString(bodyStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
buf.WriteByte('}') // for the whole record
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// renderGroup returns a string representation of the named group with rendered
|
||||||
|
// values and args. If the name is empty, this will return the values and args,
|
||||||
|
// joined. If the name is not empty, this will return a single key-value pair,
|
||||||
|
// where the value is a grouping of the values and args. If the values and
|
||||||
|
// args are both empty, this will return an empty string, even if the name was
|
||||||
|
// specified.
|
||||||
|
func (f Formatter) renderGroup(name string, values string, args string) string {
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
|
||||||
|
needClosingBrace := false
|
||||||
|
if name != "" && (values != "" || args != "") {
|
||||||
|
buf.WriteString(f.quoted(name, true)) // escape user-provided keys
|
||||||
|
buf.WriteByte(f.colon())
|
||||||
|
buf.WriteByte('{')
|
||||||
|
needClosingBrace = true
|
||||||
|
}
|
||||||
|
|
||||||
|
continuing := false
|
||||||
|
if values != "" {
|
||||||
|
buf.WriteString(values)
|
||||||
|
continuing = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if args != "" {
|
||||||
|
if continuing {
|
||||||
|
buf.WriteByte(f.comma())
|
||||||
|
}
|
||||||
|
buf.WriteString(args)
|
||||||
|
}
|
||||||
|
|
||||||
|
if needClosingBrace {
|
||||||
|
buf.WriteByte('}')
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// flatten renders a list of key-value pairs into a buffer. If escapeKeys is
|
||||||
|
// true, the keys are assumed to have non-JSON-compatible characters in them
|
||||||
|
// and must be evaluated for escapes.
|
||||||
|
//
|
||||||
|
// This function returns a potentially modified version of kvList, which
|
||||||
|
// ensures that there is a value for every key (adding a value if needed) and
|
||||||
|
// that each key is a string (substituting a key if needed).
|
||||||
|
func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any {
|
||||||
|
// This logic overlaps with sanitize() but saves one type-cast per key,
|
||||||
|
// which can be measurable.
|
||||||
|
if len(kvList)%2 != 0 {
|
||||||
|
kvList = append(kvList, noValue)
|
||||||
|
}
|
||||||
|
copied := false
|
||||||
|
for i := 0; i < len(kvList); i += 2 {
|
||||||
|
k, ok := kvList[i].(string)
|
||||||
|
if !ok {
|
||||||
|
if !copied {
|
||||||
|
newList := make([]any, len(kvList))
|
||||||
|
copy(newList, kvList)
|
||||||
|
kvList = newList
|
||||||
|
copied = true
|
||||||
|
}
|
||||||
|
k = f.nonStringKey(kvList[i])
|
||||||
|
kvList[i] = k
|
||||||
|
}
|
||||||
|
v := kvList[i+1]
|
||||||
|
|
||||||
|
if i > 0 {
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
buf.WriteByte(f.comma())
|
||||||
|
} else {
|
||||||
|
// In theory the format could be something we don't understand. In
|
||||||
|
// practice, we control it, so it won't be.
|
||||||
|
buf.WriteByte(' ')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString(f.quoted(k, escapeKeys))
|
||||||
|
buf.WriteByte(f.colon())
|
||||||
|
buf.WriteString(f.pretty(v))
|
||||||
|
}
|
||||||
|
return kvList
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) quoted(str string, escape bool) string {
|
||||||
|
if escape {
|
||||||
|
return prettyString(str)
|
||||||
|
}
|
||||||
|
// this is faster
|
||||||
|
return `"` + str + `"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) comma() byte {
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
return ','
|
||||||
|
}
|
||||||
|
return ' '
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) colon() byte {
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
return ':'
|
||||||
|
}
|
||||||
|
return '='
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) pretty(value any) string {
|
||||||
|
return f.prettyWithFlags(value, 0, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
flagRawStruct = 0x1 // do not print braces on structs
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: This is not fast. Most of the overhead goes here.
|
||||||
|
func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||||
|
if depth > f.opts.MaxLogDepth {
|
||||||
|
return `"<max-log-depth-exceeded>"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle types that take full control of logging.
|
||||||
|
if v, ok := value.(logr.Marshaler); ok {
|
||||||
|
// Replace the value with what the type wants to get logged.
|
||||||
|
// That then gets handled below via reflection.
|
||||||
|
value = invokeMarshaler(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle types that want to format themselves.
|
||||||
|
switch v := value.(type) {
|
||||||
|
case fmt.Stringer:
|
||||||
|
value = invokeStringer(v)
|
||||||
|
case error:
|
||||||
|
value = invokeError(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handling the most common types without reflect is a small perf win.
|
||||||
|
switch v := value.(type) {
|
||||||
|
case bool:
|
||||||
|
return strconv.FormatBool(v)
|
||||||
|
case string:
|
||||||
|
return prettyString(v)
|
||||||
|
case int:
|
||||||
|
return strconv.FormatInt(int64(v), 10)
|
||||||
|
case int8:
|
||||||
|
return strconv.FormatInt(int64(v), 10)
|
||||||
|
case int16:
|
||||||
|
return strconv.FormatInt(int64(v), 10)
|
||||||
|
case int32:
|
||||||
|
return strconv.FormatInt(int64(v), 10)
|
||||||
|
case int64:
|
||||||
|
return strconv.FormatInt(int64(v), 10)
|
||||||
|
case uint:
|
||||||
|
return strconv.FormatUint(uint64(v), 10)
|
||||||
|
case uint8:
|
||||||
|
return strconv.FormatUint(uint64(v), 10)
|
||||||
|
case uint16:
|
||||||
|
return strconv.FormatUint(uint64(v), 10)
|
||||||
|
case uint32:
|
||||||
|
return strconv.FormatUint(uint64(v), 10)
|
||||||
|
case uint64:
|
||||||
|
return strconv.FormatUint(v, 10)
|
||||||
|
case uintptr:
|
||||||
|
return strconv.FormatUint(uint64(v), 10)
|
||||||
|
case float32:
|
||||||
|
return strconv.FormatFloat(float64(v), 'f', -1, 32)
|
||||||
|
case float64:
|
||||||
|
return strconv.FormatFloat(v, 'f', -1, 64)
|
||||||
|
case complex64:
|
||||||
|
return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
|
||||||
|
case complex128:
|
||||||
|
return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
|
||||||
|
case PseudoStruct:
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
v = f.sanitize(v)
|
||||||
|
if flags&flagRawStruct == 0 {
|
||||||
|
buf.WriteByte('{')
|
||||||
|
}
|
||||||
|
for i := 0; i < len(v); i += 2 {
|
||||||
|
if i > 0 {
|
||||||
|
buf.WriteByte(f.comma())
|
||||||
|
}
|
||||||
|
k, _ := v[i].(string) // sanitize() above means no need to check success
|
||||||
|
// arbitrary keys might need escaping
|
||||||
|
buf.WriteString(prettyString(k))
|
||||||
|
buf.WriteByte(f.colon())
|
||||||
|
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
|
||||||
|
}
|
||||||
|
if flags&flagRawStruct == 0 {
|
||||||
|
buf.WriteByte('}')
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, 256))
|
||||||
|
t := reflect.TypeOf(value)
|
||||||
|
if t == nil {
|
||||||
|
return "null"
|
||||||
|
}
|
||||||
|
v := reflect.ValueOf(value)
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
return strconv.FormatBool(v.Bool())
|
||||||
|
case reflect.String:
|
||||||
|
return prettyString(v.String())
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return strconv.FormatInt(int64(v.Int()), 10)
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
return strconv.FormatUint(uint64(v.Uint()), 10)
|
||||||
|
case reflect.Float32:
|
||||||
|
return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
|
||||||
|
case reflect.Float64:
|
||||||
|
return strconv.FormatFloat(v.Float(), 'f', -1, 64)
|
||||||
|
case reflect.Complex64:
|
||||||
|
return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
|
||||||
|
case reflect.Complex128:
|
||||||
|
return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
|
||||||
|
case reflect.Struct:
|
||||||
|
if flags&flagRawStruct == 0 {
|
||||||
|
buf.WriteByte('{')
|
||||||
|
}
|
||||||
|
printComma := false // testing i>0 is not enough because of JSON omitted fields
|
||||||
|
for i := 0; i < t.NumField(); i++ {
|
||||||
|
fld := t.Field(i)
|
||||||
|
if fld.PkgPath != "" {
|
||||||
|
// reflect says this field is only defined for non-exported fields.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !v.Field(i).CanInterface() {
|
||||||
|
// reflect isn't clear exactly what this means, but we can't use it.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := ""
|
||||||
|
omitempty := false
|
||||||
|
if tag, found := fld.Tag.Lookup("json"); found {
|
||||||
|
if tag == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if comma := strings.Index(tag, ","); comma != -1 {
|
||||||
|
if n := tag[:comma]; n != "" {
|
||||||
|
name = n
|
||||||
|
}
|
||||||
|
rest := tag[comma:]
|
||||||
|
if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
|
||||||
|
omitempty = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
name = tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if omitempty && isEmpty(v.Field(i)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if printComma {
|
||||||
|
buf.WriteByte(f.comma())
|
||||||
|
}
|
||||||
|
printComma = true // if we got here, we are rendering a field
|
||||||
|
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
|
||||||
|
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if name == "" {
|
||||||
|
name = fld.Name
|
||||||
|
}
|
||||||
|
// field names can't contain characters which need escaping
|
||||||
|
buf.WriteString(f.quoted(name, false))
|
||||||
|
buf.WriteByte(f.colon())
|
||||||
|
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
|
||||||
|
}
|
||||||
|
if flags&flagRawStruct == 0 {
|
||||||
|
buf.WriteByte('}')
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
// If this is outputing as JSON make sure this isn't really a json.RawMessage.
|
||||||
|
// If so just emit "as-is" and don't pretty it as that will just print
|
||||||
|
// it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
if rm, ok := value.(json.RawMessage); ok {
|
||||||
|
// If it's empty make sure we emit an empty value as the array style would below.
|
||||||
|
if len(rm) > 0 {
|
||||||
|
buf.Write(rm)
|
||||||
|
} else {
|
||||||
|
buf.WriteString("null")
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf.WriteByte('[')
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
if i > 0 {
|
||||||
|
buf.WriteByte(f.comma())
|
||||||
|
}
|
||||||
|
e := v.Index(i)
|
||||||
|
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
|
||||||
|
}
|
||||||
|
buf.WriteByte(']')
|
||||||
|
return buf.String()
|
||||||
|
case reflect.Map:
|
||||||
|
buf.WriteByte('{')
|
||||||
|
// This does not sort the map keys, for best perf.
|
||||||
|
it := v.MapRange()
|
||||||
|
i := 0
|
||||||
|
for it.Next() {
|
||||||
|
if i > 0 {
|
||||||
|
buf.WriteByte(f.comma())
|
||||||
|
}
|
||||||
|
// If a map key supports TextMarshaler, use it.
|
||||||
|
keystr := ""
|
||||||
|
if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
|
||||||
|
txt, err := m.MarshalText()
|
||||||
|
if err != nil {
|
||||||
|
keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error())
|
||||||
|
} else {
|
||||||
|
keystr = string(txt)
|
||||||
|
}
|
||||||
|
keystr = prettyString(keystr)
|
||||||
|
} else {
|
||||||
|
// prettyWithFlags will produce already-escaped values
|
||||||
|
keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
|
||||||
|
if t.Key().Kind() != reflect.String {
|
||||||
|
// JSON only does string keys. Unlike Go's standard JSON, we'll
|
||||||
|
// convert just about anything to a string.
|
||||||
|
keystr = prettyString(keystr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf.WriteString(keystr)
|
||||||
|
buf.WriteByte(f.colon())
|
||||||
|
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
buf.WriteByte('}')
|
||||||
|
return buf.String()
|
||||||
|
case reflect.Ptr, reflect.Interface:
|
||||||
|
if v.IsNil() {
|
||||||
|
return "null"
|
||||||
|
}
|
||||||
|
return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func prettyString(s string) string {
|
||||||
|
// Avoid escaping (which does allocations) if we can.
|
||||||
|
if needsEscape(s) {
|
||||||
|
return strconv.Quote(s)
|
||||||
|
}
|
||||||
|
b := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
b.WriteByte('"')
|
||||||
|
b.WriteString(s)
|
||||||
|
b.WriteByte('"')
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// needsEscape determines whether the input string needs to be escaped or not,
|
||||||
|
// without doing any allocations.
|
||||||
|
func needsEscape(s string) bool {
|
||||||
|
for _, r := range s {
|
||||||
|
if !strconv.IsPrint(r) || r == '\\' || r == '"' {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isEmpty(v reflect.Value) bool {
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||||
|
return v.Len() == 0
|
||||||
|
case reflect.Bool:
|
||||||
|
return !v.Bool()
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return v.Int() == 0
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
return v.Uint() == 0
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return v.Float() == 0
|
||||||
|
case reflect.Complex64, reflect.Complex128:
|
||||||
|
return v.Complex() == 0
|
||||||
|
case reflect.Interface, reflect.Ptr:
|
||||||
|
return v.IsNil()
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func invokeMarshaler(m logr.Marshaler) (ret any) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
ret = fmt.Sprintf("<panic: %s>", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return m.MarshalLog()
|
||||||
|
}
|
||||||
|
|
||||||
|
func invokeStringer(s fmt.Stringer) (ret string) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
ret = fmt.Sprintf("<panic: %s>", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return s.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func invokeError(e error) (ret string) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
ret = fmt.Sprintf("<panic: %s>", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return e.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Caller represents the original call site for a log line, after considering
|
||||||
|
// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
|
||||||
|
// Line fields will always be provided, while the Func field is optional.
|
||||||
|
// Users can set the render hook fields in Options to examine logged key-value
|
||||||
|
// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
|
||||||
|
// field is enabled for the given MessageClass.
|
||||||
|
type Caller struct {
|
||||||
|
// File is the basename of the file for this call site.
|
||||||
|
File string `json:"file"`
|
||||||
|
// Line is the line number in the file for this call site.
|
||||||
|
Line int `json:"line"`
|
||||||
|
// Func is the function name for this call site, or empty if
|
||||||
|
// Options.LogCallerFunc is not enabled.
|
||||||
|
Func string `json:"function,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) caller() Caller {
|
||||||
|
// +1 for this frame, +1 for Info/Error.
|
||||||
|
pc, file, line, ok := runtime.Caller(f.depth + 2)
|
||||||
|
if !ok {
|
||||||
|
return Caller{"<unknown>", 0, ""}
|
||||||
|
}
|
||||||
|
fn := ""
|
||||||
|
if f.opts.LogCallerFunc {
|
||||||
|
if fp := runtime.FuncForPC(pc); fp != nil {
|
||||||
|
fn = fp.Name()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Caller{filepath.Base(file), line, fn}
|
||||||
|
}
|
||||||
|
|
||||||
|
const noValue = "<no-value>"
|
||||||
|
|
||||||
|
func (f Formatter) nonStringKey(v any) string {
|
||||||
|
return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// snippet produces a short snippet string of an arbitrary value.
|
||||||
|
func (f Formatter) snippet(v any) string {
|
||||||
|
const snipLen = 16
|
||||||
|
|
||||||
|
snip := f.pretty(v)
|
||||||
|
if len(snip) > snipLen {
|
||||||
|
snip = snip[:snipLen]
|
||||||
|
}
|
||||||
|
return snip
|
||||||
|
}
|
||||||
|
|
||||||
|
// sanitize ensures that a list of key-value pairs has a value for every key
|
||||||
|
// (adding a value if needed) and that each key is a string (substituting a key
|
||||||
|
// if needed).
|
||||||
|
func (f Formatter) sanitize(kvList []any) []any {
|
||||||
|
if len(kvList)%2 != 0 {
|
||||||
|
kvList = append(kvList, noValue)
|
||||||
|
}
|
||||||
|
for i := 0; i < len(kvList); i += 2 {
|
||||||
|
_, ok := kvList[i].(string)
|
||||||
|
if !ok {
|
||||||
|
kvList[i] = f.nonStringKey(kvList[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return kvList
|
||||||
|
}
|
||||||
|
|
||||||
|
// startGroup opens a new group scope (basically a sub-struct), which locks all
|
||||||
|
// the current saved values and starts them anew. This is needed to satisfy
|
||||||
|
// slog.
|
||||||
|
func (f *Formatter) startGroup(name string) {
|
||||||
|
// Unnamed groups are just inlined.
|
||||||
|
if name == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
n := len(f.groups)
|
||||||
|
f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr})
|
||||||
|
|
||||||
|
// Start collecting new values.
|
||||||
|
f.groupName = name
|
||||||
|
f.valuesStr = ""
|
||||||
|
f.values = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init configures this Formatter from runtime info, such as the call depth
|
||||||
|
// imposed by logr itself.
|
||||||
|
// Note that this receiver is a pointer, so depth can be saved.
|
||||||
|
func (f *Formatter) Init(info logr.RuntimeInfo) {
|
||||||
|
f.depth += info.CallDepth
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enabled checks whether an info message at the given level should be logged.
|
||||||
|
func (f Formatter) Enabled(level int) bool {
|
||||||
|
return level <= f.opts.Verbosity
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDepth returns the current depth of this Formatter. This is useful for
|
||||||
|
// implementations which do their own caller attribution.
|
||||||
|
func (f Formatter) GetDepth() int {
|
||||||
|
return f.depth
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatInfo renders an Info log message into strings. The prefix will be
|
||||||
|
// empty when no names were set (via AddNames), or when the output is
|
||||||
|
// configured for JSON.
|
||||||
|
func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
|
||||||
|
args := make([]any, 0, 64) // using a constant here impacts perf
|
||||||
|
prefix = f.prefix
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
args = append(args, "logger", prefix)
|
||||||
|
prefix = ""
|
||||||
|
}
|
||||||
|
if f.opts.LogTimestamp {
|
||||||
|
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
|
||||||
|
}
|
||||||
|
if policy := f.opts.LogCaller; policy == All || policy == Info {
|
||||||
|
args = append(args, "caller", f.caller())
|
||||||
|
}
|
||||||
|
if key := *f.opts.LogInfoLevel; key != "" {
|
||||||
|
args = append(args, key, level)
|
||||||
|
}
|
||||||
|
args = append(args, "msg", msg)
|
||||||
|
return prefix, f.render(args, kvList)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatError renders an Error log message into strings. The prefix will be
|
||||||
|
// empty when no names were set (via AddNames), or when the output is
|
||||||
|
// configured for JSON.
|
||||||
|
func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
|
||||||
|
args := make([]any, 0, 64) // using a constant here impacts perf
|
||||||
|
prefix = f.prefix
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
args = append(args, "logger", prefix)
|
||||||
|
prefix = ""
|
||||||
|
}
|
||||||
|
if f.opts.LogTimestamp {
|
||||||
|
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
|
||||||
|
}
|
||||||
|
if policy := f.opts.LogCaller; policy == All || policy == Error {
|
||||||
|
args = append(args, "caller", f.caller())
|
||||||
|
}
|
||||||
|
args = append(args, "msg", msg)
|
||||||
|
var loggableErr any
|
||||||
|
if err != nil {
|
||||||
|
loggableErr = err.Error()
|
||||||
|
}
|
||||||
|
args = append(args, "error", loggableErr)
|
||||||
|
return prefix, f.render(args, kvList)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddName appends the specified name. funcr uses '/' characters to separate
|
||||||
|
// name elements. Callers should not pass '/' in the provided name string, but
|
||||||
|
// this library does not actually enforce that.
|
||||||
|
func (f *Formatter) AddName(name string) {
|
||||||
|
if len(f.prefix) > 0 {
|
||||||
|
f.prefix += "/"
|
||||||
|
}
|
||||||
|
f.prefix += name
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddValues adds key-value pairs to the set of saved values to be logged with
|
||||||
|
// each log line.
|
||||||
|
func (f *Formatter) AddValues(kvList []any) {
|
||||||
|
// Three slice args forces a copy.
|
||||||
|
n := len(f.values)
|
||||||
|
f.values = append(f.values[:n:n], kvList...)
|
||||||
|
|
||||||
|
vals := f.values
|
||||||
|
if hook := f.opts.RenderValuesHook; hook != nil {
|
||||||
|
vals = hook(f.sanitize(vals))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pre-render values, so we don't have to do it on each Info/Error call.
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
f.flatten(buf, vals, true) // escape user-provided keys
|
||||||
|
f.valuesStr = buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddCallDepth increases the number of stack-frames to skip when attributing
|
||||||
|
// the log line to a file and line.
|
||||||
|
func (f *Formatter) AddCallDepth(depth int) {
|
||||||
|
f.depth += depth
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,105 @@
|
||||||
|
//go:build go1.21
|
||||||
|
// +build go1.21
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright 2023 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package funcr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ logr.SlogSink = &fnlogger{}
|
||||||
|
|
||||||
|
const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
|
||||||
|
|
||||||
|
func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
|
||||||
|
kvList := make([]any, 0, 2*record.NumAttrs())
|
||||||
|
record.Attrs(func(attr slog.Attr) bool {
|
||||||
|
kvList = attrToKVs(attr, kvList)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
if record.Level >= slog.LevelError {
|
||||||
|
l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
|
||||||
|
} else {
|
||||||
|
level := l.levelFromSlog(record.Level)
|
||||||
|
l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
|
||||||
|
kvList := make([]any, 0, 2*len(attrs))
|
||||||
|
for _, attr := range attrs {
|
||||||
|
kvList = attrToKVs(attr, kvList)
|
||||||
|
}
|
||||||
|
l.AddValues(kvList)
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) WithGroup(name string) logr.SlogSink {
|
||||||
|
l.startGroup(name)
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
|
||||||
|
// and other details of slog.
|
||||||
|
func attrToKVs(attr slog.Attr, kvList []any) []any {
|
||||||
|
attrVal := attr.Value.Resolve()
|
||||||
|
if attrVal.Kind() == slog.KindGroup {
|
||||||
|
groupVal := attrVal.Group()
|
||||||
|
grpKVs := make([]any, 0, 2*len(groupVal))
|
||||||
|
for _, attr := range groupVal {
|
||||||
|
grpKVs = attrToKVs(attr, grpKVs)
|
||||||
|
}
|
||||||
|
if attr.Key == "" {
|
||||||
|
// slog says we have to inline these
|
||||||
|
kvList = append(kvList, grpKVs...)
|
||||||
|
} else {
|
||||||
|
kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
|
||||||
|
}
|
||||||
|
} else if attr.Key != "" {
|
||||||
|
kvList = append(kvList, attr.Key, attrVal.Any())
|
||||||
|
}
|
||||||
|
|
||||||
|
return kvList
|
||||||
|
}
|
||||||
|
|
||||||
|
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
|
||||||
|
// It ensures that the result is >= 0. This is necessary because the result is
|
||||||
|
// passed to a LogSink and that API did not historically document whether
|
||||||
|
// levels could be negative or what that meant.
|
||||||
|
//
|
||||||
|
// Some example usage:
|
||||||
|
//
|
||||||
|
// logrV0 := getMyLogger()
|
||||||
|
// logrV2 := logrV0.V(2)
|
||||||
|
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
|
||||||
|
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
|
||||||
|
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
|
||||||
|
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
|
||||||
|
func (l fnlogger) levelFromSlog(level slog.Level) int {
|
||||||
|
result := -level
|
||||||
|
if result < 0 {
|
||||||
|
result = 0 // because LogSink doesn't expect negative V levels
|
||||||
|
}
|
||||||
|
return int(result)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
@ -0,0 +1,6 @@
|
||||||
|
# Minimal Go logging using logr and Go's standard library
|
||||||
|
|
||||||
|
[](https://pkg.go.dev/github.com/go-logr/stdr)
|
||||||
|
|
||||||
|
This package implements the [logr interface](https://github.com/go-logr/logr)
|
||||||
|
in terms of Go's standard log package(https://pkg.go.dev/log).
|
||||||
|
|
@ -0,0 +1,170 @@
|
||||||
|
/*
|
||||||
|
Copyright 2019 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package stdr implements github.com/go-logr/logr.Logger in terms of
|
||||||
|
// Go's standard log package.
|
||||||
|
package stdr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"github.com/go-logr/logr/funcr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The global verbosity level. See SetVerbosity().
|
||||||
|
var globalVerbosity int
|
||||||
|
|
||||||
|
// SetVerbosity sets the global level against which all info logs will be
|
||||||
|
// compared. If this is greater than or equal to the "V" of the logger, the
|
||||||
|
// message will be logged. A higher value here means more logs will be written.
|
||||||
|
// The previous verbosity value is returned. This is not concurrent-safe -
|
||||||
|
// callers must be sure to call it from only one goroutine.
|
||||||
|
func SetVerbosity(v int) int {
|
||||||
|
old := globalVerbosity
|
||||||
|
globalVerbosity = v
|
||||||
|
return old
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a logr.Logger which is implemented by Go's standard log package,
|
||||||
|
// or something like it. If std is nil, this will use a default logger
|
||||||
|
// instead.
|
||||||
|
//
|
||||||
|
// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
|
||||||
|
func New(std StdLogger) logr.Logger {
|
||||||
|
return NewWithOptions(std, Options{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWithOptions returns a logr.Logger which is implemented by Go's standard
|
||||||
|
// log package, or something like it. See New for details.
|
||||||
|
func NewWithOptions(std StdLogger, opts Options) logr.Logger {
|
||||||
|
if std == nil {
|
||||||
|
// Go's log.Default() is only available in 1.16 and higher.
|
||||||
|
std = log.New(os.Stderr, "", log.LstdFlags)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Depth < 0 {
|
||||||
|
opts.Depth = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
fopts := funcr.Options{
|
||||||
|
LogCaller: funcr.MessageClass(opts.LogCaller),
|
||||||
|
}
|
||||||
|
|
||||||
|
sl := &logger{
|
||||||
|
Formatter: funcr.NewFormatter(fopts),
|
||||||
|
std: std,
|
||||||
|
}
|
||||||
|
|
||||||
|
// For skipping our own logger.Info/Error.
|
||||||
|
sl.Formatter.AddCallDepth(1 + opts.Depth)
|
||||||
|
|
||||||
|
return logr.New(sl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options carries parameters which influence the way logs are generated.
|
||||||
|
type Options struct {
|
||||||
|
// Depth biases the assumed number of call frames to the "true" caller.
|
||||||
|
// This is useful when the calling code calls a function which then calls
|
||||||
|
// stdr (e.g. a logging shim to another API). Values less than zero will
|
||||||
|
// be treated as zero.
|
||||||
|
Depth int
|
||||||
|
|
||||||
|
// LogCaller tells stdr to add a "caller" key to some or all log lines.
|
||||||
|
// Go's log package has options to log this natively, too.
|
||||||
|
LogCaller MessageClass
|
||||||
|
|
||||||
|
// TODO: add an option to log the date/time
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageClass indicates which category or categories of messages to consider.
|
||||||
|
type MessageClass int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// None ignores all message classes.
|
||||||
|
None MessageClass = iota
|
||||||
|
// All considers all message classes.
|
||||||
|
All
|
||||||
|
// Info only considers info messages.
|
||||||
|
Info
|
||||||
|
// Error only considers error messages.
|
||||||
|
Error
|
||||||
|
)
|
||||||
|
|
||||||
|
// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
|
||||||
|
// this adapter.
|
||||||
|
type StdLogger interface {
|
||||||
|
// Output is the same as log.Output and log.Logger.Output.
|
||||||
|
Output(calldepth int, logline string) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type logger struct {
|
||||||
|
funcr.Formatter
|
||||||
|
std StdLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ logr.LogSink = &logger{}
|
||||||
|
var _ logr.CallDepthLogSink = &logger{}
|
||||||
|
|
||||||
|
func (l logger) Enabled(level int) bool {
|
||||||
|
return globalVerbosity >= level
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l logger) Info(level int, msg string, kvList ...interface{}) {
|
||||||
|
prefix, args := l.FormatInfo(level, msg, kvList)
|
||||||
|
if prefix != "" {
|
||||||
|
args = prefix + ": " + args
|
||||||
|
}
|
||||||
|
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l logger) Error(err error, msg string, kvList ...interface{}) {
|
||||||
|
prefix, args := l.FormatError(err, msg, kvList)
|
||||||
|
if prefix != "" {
|
||||||
|
args = prefix + ": " + args
|
||||||
|
}
|
||||||
|
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l logger) WithName(name string) logr.LogSink {
|
||||||
|
l.Formatter.AddName(name)
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
|
||||||
|
l.Formatter.AddValues(kvList)
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l logger) WithCallDepth(depth int) logr.LogSink {
|
||||||
|
l.Formatter.AddCallDepth(depth)
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
// Underlier exposes access to the underlying logging implementation. Since
|
||||||
|
// callers only have a logr.Logger, they have to know which implementation is
|
||||||
|
// in use, so this interface is less of an abstraction and more of way to test
|
||||||
|
// type conversion.
|
||||||
|
type Underlier interface {
|
||||||
|
GetUnderlying() StdLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger
|
||||||
|
// is itself an interface, the result may or may not be a Go log.Logger.
|
||||||
|
func (l logger) GetUnderlying() StdLogger {
|
||||||
|
return l.std
|
||||||
|
}
|
||||||
|
|
@ -148,10 +148,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
|
||||||
}
|
}
|
||||||
|
|
||||||
md, ok := ServerMetadataFromContext(ctx)
|
md, ok := ServerMetadataFromContext(ctx)
|
||||||
if !ok {
|
if ok {
|
||||||
grpclog.Error("Failed to extract ServerMetadata from context")
|
|
||||||
}
|
|
||||||
|
|
||||||
handleForwardResponseServerMetadata(w, mux, md)
|
handleForwardResponseServerMetadata(w, mux, md)
|
||||||
|
|
||||||
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||||
|
|
@ -165,6 +162,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
|
||||||
handleForwardResponseTrailerHeader(w, mux, md)
|
handleForwardResponseTrailerHeader(w, mux, md)
|
||||||
w.Header().Set("Transfer-Encoding", "chunked")
|
w.Header().Set("Transfer-Encoding", "chunked")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
st := HTTPStatusFromCode(s.Code())
|
st := HTTPStatusFromCode(s.Code())
|
||||||
if customStatus != nil {
|
if customStatus != nil {
|
||||||
|
|
@ -176,7 +174,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
|
||||||
grpclog.Errorf("Failed to write response: %v", err)
|
grpclog.Errorf("Failed to write response: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if doForwardTrailers {
|
if ok && requestAcceptsTrailers(r) {
|
||||||
handleForwardResponseTrailer(w, mux, md)
|
handleForwardResponseTrailer(w, mux, md)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -153,11 +153,9 @@ type responseBody interface {
|
||||||
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
|
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
|
||||||
func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||||
md, ok := ServerMetadataFromContext(ctx)
|
md, ok := ServerMetadataFromContext(ctx)
|
||||||
if !ok {
|
if ok {
|
||||||
grpclog.Error("Failed to extract ServerMetadata from context")
|
|
||||||
}
|
|
||||||
|
|
||||||
handleForwardResponseServerMetadata(w, mux, md)
|
handleForwardResponseServerMetadata(w, mux, md)
|
||||||
|
}
|
||||||
|
|
||||||
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||||
// Unless the request includes a TE header field indicating "trailers"
|
// Unless the request includes a TE header field indicating "trailers"
|
||||||
|
|
@ -166,7 +164,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
|
||||||
// agent to receive.
|
// agent to receive.
|
||||||
doForwardTrailers := requestAcceptsTrailers(req)
|
doForwardTrailers := requestAcceptsTrailers(req)
|
||||||
|
|
||||||
if doForwardTrailers {
|
if ok && doForwardTrailers {
|
||||||
handleForwardResponseTrailerHeader(w, mux, md)
|
handleForwardResponseTrailerHeader(w, mux, md)
|
||||||
w.Header().Set("Transfer-Encoding", "chunked")
|
w.Header().Set("Transfer-Encoding", "chunked")
|
||||||
}
|
}
|
||||||
|
|
@ -204,7 +202,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
|
||||||
grpclog.Errorf("Failed to write response: %v", err)
|
grpclog.Errorf("Failed to write response: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if doForwardTrailers {
|
if ok && doForwardTrailers {
|
||||||
handleForwardResponseTrailer(w, mux, md)
|
handleForwardResponseTrailer(w, mux, md)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -345,8 +345,8 @@ func (p *TextParser) startLabelName() stateFn {
|
||||||
}
|
}
|
||||||
// Special summary/histogram treatment. Don't add 'quantile' and 'le'
|
// Special summary/histogram treatment. Don't add 'quantile' and 'le'
|
||||||
// labels to 'real' labels.
|
// labels to 'real' labels.
|
||||||
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
|
if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) &&
|
||||||
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
|
(p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) {
|
||||||
p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
|
p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
|
||||||
}
|
}
|
||||||
// Check for duplicate label names.
|
// Check for duplicate label names.
|
||||||
|
|
|
||||||
|
|
@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool {
|
||||||
return a.ResolvedAt(time.Now())
|
return a.ResolvedAt(time.Now())
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolvedAt returns true off the activity interval ended before
|
// ResolvedAt returns true iff the activity interval ended before
|
||||||
// the given timestamp.
|
// the given timestamp.
|
||||||
func (a *Alert) ResolvedAt(ts time.Time) bool {
|
func (a *Alert) ResolvedAt(ts time.Time) bool {
|
||||||
if a.EndsAt.IsZero() {
|
if a.EndsAt.IsZero() {
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// AlertNameLabel is the name of the label containing the an alert's name.
|
// AlertNameLabel is the name of the label containing the alert's name.
|
||||||
AlertNameLabel = "alertname"
|
AlertNameLabel = "alertname"
|
||||||
|
|
||||||
// ExportedLabelPrefix is the prefix to prepend to the label names present in
|
// ExportedLabelPrefix is the prefix to prepend to the label names present in
|
||||||
|
|
@ -122,7 +122,8 @@ func (ln LabelName) IsValidLegacy() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
for i, b := range ln {
|
for i, b := range ln {
|
||||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
// TODO: Apply De Morgan's law. Make sure there are tests for this.
|
||||||
|
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -27,13 +27,25 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// NameValidationScheme determines the method of name validation to be used by
|
// NameValidationScheme determines the global default method of the name
|
||||||
// all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8
|
// validation to be used by all calls to IsValidMetricName() and LabelName
|
||||||
// mode in isolation from other components that don't support UTF-8 may result
|
// IsValid().
|
||||||
// in bugs or other undefined behavior. This value can be set to
|
//
|
||||||
// LegacyValidation during startup if a binary is not UTF-8-aware binaries. To
|
// Deprecated: This variable should not be used and might be removed in the
|
||||||
// avoid need for locking, this value should be set once, ideally in an
|
// far future. If you wish to stick to the legacy name validation use
|
||||||
// init(), before multiple goroutines are started.
|
// `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods
|
||||||
|
// instead. This variable is here as an escape hatch for emergency cases,
|
||||||
|
// given the recent change from `LegacyValidation` to `UTF8Validation`, e.g.,
|
||||||
|
// to delay UTF-8 migrations in time or aid in debugging unforeseen results of
|
||||||
|
// the change. In such a case, a temporary assignment to `LegacyValidation`
|
||||||
|
// value in the `init()` function in your main.go or so, could be considered.
|
||||||
|
//
|
||||||
|
// Historically we opted for a global variable for feature gating different
|
||||||
|
// validation schemes in operations that were not otherwise easily adjustable
|
||||||
|
// (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate
|
||||||
|
// Labels structure or package might have been a better choice. Given the
|
||||||
|
// change was made and many upgraded the common already, we live this as-is
|
||||||
|
// with this warning and learning for the future.
|
||||||
NameValidationScheme = UTF8Validation
|
NameValidationScheme = UTF8Validation
|
||||||
|
|
||||||
// NameEscapingScheme defines the default way that names will be escaped when
|
// NameEscapingScheme defines the default way that names will be escaped when
|
||||||
|
|
@ -50,7 +62,7 @@ var (
|
||||||
type ValidationScheme int
|
type ValidationScheme int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// LegacyValidation is a setting that requirets that metric and label names
|
// LegacyValidation is a setting that requires that all metric and label names
|
||||||
// conform to the original Prometheus character requirements described by
|
// conform to the original Prometheus character requirements described by
|
||||||
// MetricNameRE and LabelNameRE.
|
// MetricNameRE and LabelNameRE.
|
||||||
LegacyValidation ValidationScheme = iota
|
LegacyValidation ValidationScheme = iota
|
||||||
|
|
|
||||||
|
|
@ -201,6 +201,7 @@ var unitMap = map[string]struct {
|
||||||
|
|
||||||
// ParseDuration parses a string into a time.Duration, assuming that a year
|
// ParseDuration parses a string into a time.Duration, assuming that a year
|
||||||
// always has 365d, a week always has 7d, and a day always has 24h.
|
// always has 365d, a week always has 7d, and a day always has 24h.
|
||||||
|
// Negative durations are not supported.
|
||||||
func ParseDuration(s string) (Duration, error) {
|
func ParseDuration(s string) (Duration, error) {
|
||||||
switch s {
|
switch s {
|
||||||
case "0":
|
case "0":
|
||||||
|
|
@ -253,18 +254,36 @@ func ParseDuration(s string) (Duration, error) {
|
||||||
return 0, errors.New("duration out of range")
|
return 0, errors.New("duration out of range")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return Duration(dur), nil
|
return Duration(dur), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations.
|
||||||
|
func ParseDurationAllowNegative(s string) (Duration, error) {
|
||||||
|
if s == "" || s[0] != '-' {
|
||||||
|
return ParseDuration(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
d, err := ParseDuration(s[1:])
|
||||||
|
|
||||||
|
return -d, err
|
||||||
|
}
|
||||||
|
|
||||||
func (d Duration) String() string {
|
func (d Duration) String() string {
|
||||||
var (
|
var (
|
||||||
ms = int64(time.Duration(d) / time.Millisecond)
|
ms = int64(time.Duration(d) / time.Millisecond)
|
||||||
r = ""
|
r = ""
|
||||||
|
sign = ""
|
||||||
)
|
)
|
||||||
|
|
||||||
if ms == 0 {
|
if ms == 0 {
|
||||||
return "0s"
|
return "0s"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ms < 0 {
|
||||||
|
sign, ms = "-", -ms
|
||||||
|
}
|
||||||
|
|
||||||
f := func(unit string, mult int64, exact bool) {
|
f := func(unit string, mult int64, exact bool) {
|
||||||
if exact && ms%mult != 0 {
|
if exact && ms%mult != 0 {
|
||||||
return
|
return
|
||||||
|
|
@ -286,7 +305,7 @@ func (d Duration) String() string {
|
||||||
f("s", 1000, false)
|
f("s", 1000, false)
|
||||||
f("ms", 1, false)
|
f("ms", 1, false)
|
||||||
|
|
||||||
return r
|
return sign + r
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON implements the json.Marshaler interface.
|
// MarshalJSON implements the json.Marshaler interface.
|
||||||
|
|
|
||||||
|
|
@ -1,22 +1,45 @@
|
||||||
---
|
version: "2"
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
- errcheck
|
- forbidigo
|
||||||
- godot
|
- godot
|
||||||
- gosimple
|
|
||||||
- govet
|
|
||||||
- ineffassign
|
|
||||||
- misspell
|
- misspell
|
||||||
- revive
|
- revive
|
||||||
- staticcheck
|
|
||||||
- testifylint
|
- testifylint
|
||||||
- unused
|
settings:
|
||||||
|
forbidigo:
|
||||||
linter-settings:
|
forbid:
|
||||||
|
- pattern: ^fmt\.Print.*$
|
||||||
|
msg: Do not commit print statements.
|
||||||
godot:
|
godot:
|
||||||
capital: true
|
|
||||||
exclude:
|
exclude:
|
||||||
# Ignore "See: URL"
|
# Ignore "See: URL".
|
||||||
- 'See:'
|
- 'See:'
|
||||||
|
capital: true
|
||||||
misspell:
|
misspell:
|
||||||
locale: US
|
locale: US
|
||||||
|
exclusions:
|
||||||
|
generated: lax
|
||||||
|
presets:
|
||||||
|
- comments
|
||||||
|
- common-false-positives
|
||||||
|
- legacy
|
||||||
|
- std-error-handling
|
||||||
|
paths:
|
||||||
|
- third_party$
|
||||||
|
- builtin$
|
||||||
|
- examples$
|
||||||
|
formatters:
|
||||||
|
enable:
|
||||||
|
- gofmt
|
||||||
|
- goimports
|
||||||
|
settings:
|
||||||
|
goimports:
|
||||||
|
local-prefixes:
|
||||||
|
- github.com/prometheus/procfs
|
||||||
|
exclusions:
|
||||||
|
generated: lax
|
||||||
|
paths:
|
||||||
|
- third_party$
|
||||||
|
- builtin$
|
||||||
|
- examples$
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
|
||||||
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
|
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
|
||||||
|
|
||||||
GO_VERSION ?= $(shell $(GO) version)
|
GO_VERSION ?= $(shell $(GO) version)
|
||||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File
|
||||||
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||||
|
|
||||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||||
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
||||||
SKIP_GOLANGCI_LINT :=
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.59.0
|
GOLANGCI_LINT_VERSION ?= v2.0.2
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
|
|
@ -275,3 +275,9 @@ $(1)_precheck:
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
fi
|
||||||
endef
|
endef
|
||||||
|
|
||||||
|
govulncheck: install-govulncheck
|
||||||
|
govulncheck ./...
|
||||||
|
|
||||||
|
install-govulncheck:
|
||||||
|
command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
|
|
||||||
|
|
@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`.
|
||||||
The procfs library includes a set of test fixtures which include many example files from
|
The procfs library includes a set of test fixtures which include many example files from
|
||||||
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
|
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
|
||||||
which is extracted automatically during testing. To add/update the test fixtures, first
|
which is extracted automatically during testing. To add/update the test fixtures, first
|
||||||
ensure the `fixtures` directory is up to date by removing the existing directory and then
|
ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then
|
||||||
extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
|
extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
rm -rf testdata/fixtures
|
rm -rf testdata/fixtures
|
||||||
make test
|
make test
|
||||||
```
|
```
|
||||||
|
|
||||||
Next, make the required changes to the extracted files in the `fixtures` directory. When
|
Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When
|
||||||
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
|
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
|
||||||
based on the updated `fixtures` directory. And finally, verify the changes using
|
based on the updated `fixtures` directory. And finally, verify the changes using
|
||||||
`git diff testdata/fixtures.ttar`.
|
`git diff testdata/fixtures.ttar`.
|
||||||
|
|
|
||||||
|
|
@ -23,9 +23,9 @@ import (
|
||||||
|
|
||||||
// Learned from include/uapi/linux/if_arp.h.
|
// Learned from include/uapi/linux/if_arp.h.
|
||||||
const (
|
const (
|
||||||
// completed entry (ha valid).
|
// Completed entry (ha valid).
|
||||||
ATFComplete = 0x02
|
ATFComplete = 0x02
|
||||||
// permanent entry.
|
// Permanent entry.
|
||||||
ATFPermanent = 0x04
|
ATFPermanent = 0x04
|
||||||
// Publish entry.
|
// Publish entry.
|
||||||
ATFPublish = 0x08
|
ATFPublish = 0x08
|
||||||
|
|
|
||||||
|
|
@ -24,8 +24,14 @@ type FS struct {
|
||||||
isReal bool
|
isReal bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
const (
|
||||||
const DefaultMountPoint = fs.DefaultProcMountPoint
|
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||||
|
DefaultMountPoint = fs.DefaultProcMountPoint
|
||||||
|
|
||||||
|
// SectorSize represents the size of a sector in bytes.
|
||||||
|
// It is specific to Linux block I/O operations.
|
||||||
|
SectorSize = 512
|
||||||
|
)
|
||||||
|
|
||||||
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
|
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
|
||||||
// It will error if the mount point directory can't be read or is a file.
|
// It will error if the mount point directory can't be read or is a file.
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
// isRealProc returns true on architectures that don't have a Type argument
|
// isRealProc returns true on architectures that don't have a Type argument
|
||||||
// in their Statfs_t struct
|
// in their Statfs_t struct.
|
||||||
func isRealProc(mountPoint string) (bool, error) {
|
func isRealProc(_ string) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -162,7 +162,7 @@ type Fscacheinfo struct {
|
||||||
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
|
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
|
||||||
// Number of release reqs ignored due to in-progress store
|
// Number of release reqs ignored due to in-progress store
|
||||||
ReleaseRequestsIgnoredDueToInProgressStore uint64
|
ReleaseRequestsIgnoredDueToInProgressStore uint64
|
||||||
// Number of page stores cancelled due to release req
|
// Number of page stores canceled due to release req
|
||||||
PageStoresCancelledByReleaseRequests uint64
|
PageStoresCancelledByReleaseRequests uint64
|
||||||
VmscanWaiting uint64
|
VmscanWaiting uint64
|
||||||
// Number of times async ops added to pending queues
|
// Number of times async ops added to pending queues
|
||||||
|
|
@ -171,11 +171,11 @@ type Fscacheinfo struct {
|
||||||
OpsRunning uint64
|
OpsRunning uint64
|
||||||
// Number of times async ops queued for processing
|
// Number of times async ops queued for processing
|
||||||
OpsEnqueued uint64
|
OpsEnqueued uint64
|
||||||
// Number of async ops cancelled
|
// Number of async ops canceled
|
||||||
OpsCancelled uint64
|
OpsCancelled uint64
|
||||||
// Number of async ops rejected due to object lookup/create failure
|
// Number of async ops rejected due to object lookup/create failure
|
||||||
OpsRejected uint64
|
OpsRejected uint64
|
||||||
// Number of async ops initialised
|
// Number of async ops initialized
|
||||||
OpsInitialised uint64
|
OpsInitialised uint64
|
||||||
// Number of async ops queued for deferred release
|
// Number of async ops queued for deferred release
|
||||||
OpsDeferred uint64
|
OpsDeferred uint64
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,9 @@ const (
|
||||||
|
|
||||||
// DefaultConfigfsMountPoint is the common mount point of the configfs.
|
// DefaultConfigfsMountPoint is the common mount point of the configfs.
|
||||||
DefaultConfigfsMountPoint = "/sys/kernel/config"
|
DefaultConfigfsMountPoint = "/sys/kernel/config"
|
||||||
|
|
||||||
|
// DefaultSelinuxMountPoint is the common mount point of the selinuxfs.
|
||||||
|
DefaultSelinuxMountPoint = "/sys/fs/selinux"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
|
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@
|
||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -110,3 +111,16 @@ func ParseBool(b string) *bool {
|
||||||
}
|
}
|
||||||
return &truth
|
return &truth
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX.
|
||||||
|
func ReadHexFromFile(path string) (uint64, error) {
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
hexString := strings.TrimSpace(string(data))
|
||||||
|
if !strings.HasPrefix(hexString, "0x") {
|
||||||
|
return 0, errors.New("invalid format: hex string does not start with '0x'")
|
||||||
|
}
|
||||||
|
return strconv.ParseUint(hexString[2:], 16, 64)
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -20,6 +20,8 @@ package util
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"os"
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) {
|
||||||
|
|
||||||
return string(bytes.TrimSpace(b[:n])), nil
|
return string(bytes.TrimSpace(b[:n])), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it.
|
||||||
|
func SysReadUintFromFile(path string) (uint64, error) {
|
||||||
|
data, err := SysReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it.
|
||||||
|
func SysReadIntFromFile(path string) (int64, error) {
|
||||||
|
data, err := SysReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -45,11 +45,11 @@ const (
|
||||||
fieldTransport11TCPLen = 13
|
fieldTransport11TCPLen = 13
|
||||||
fieldTransport11UDPLen = 10
|
fieldTransport11UDPLen = 10
|
||||||
|
|
||||||
// kernel version >= 4.14 MaxLen
|
// Kernel version >= 4.14 MaxLen
|
||||||
// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
|
// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
|
||||||
fieldTransport11RDMAMaxLen = 28
|
fieldTransport11RDMAMaxLen = 28
|
||||||
|
|
||||||
// kernel version <= 4.2 MinLen
|
// Kernel version <= 4.2 MinLen
|
||||||
// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
|
// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
|
||||||
fieldTransport11RDMAMinLen = 20
|
fieldTransport11RDMAMinLen = 20
|
||||||
)
|
)
|
||||||
|
|
@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
||||||
switch statVersion {
|
switch statVersion {
|
||||||
case statVersion10:
|
case statVersion10:
|
||||||
var expectedLength int
|
var expectedLength int
|
||||||
if protocol == "tcp" {
|
switch protocol {
|
||||||
|
case "tcp":
|
||||||
expectedLength = fieldTransport10TCPLen
|
expectedLength = fieldTransport10TCPLen
|
||||||
} else if protocol == "udp" {
|
case "udp":
|
||||||
expectedLength = fieldTransport10UDPLen
|
expectedLength = fieldTransport10UDPLen
|
||||||
} else {
|
default:
|
||||||
return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
|
return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
|
||||||
}
|
}
|
||||||
if len(ss) != expectedLength {
|
if len(ss) != expectedLength {
|
||||||
|
|
@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
||||||
}
|
}
|
||||||
case statVersion11:
|
case statVersion11:
|
||||||
var expectedLength int
|
var expectedLength int
|
||||||
if protocol == "tcp" {
|
switch protocol {
|
||||||
|
case "tcp":
|
||||||
expectedLength = fieldTransport11TCPLen
|
expectedLength = fieldTransport11TCPLen
|
||||||
} else if protocol == "udp" {
|
case "udp":
|
||||||
expectedLength = fieldTransport11UDPLen
|
expectedLength = fieldTransport11UDPLen
|
||||||
} else if protocol == "rdma" {
|
case "rdma":
|
||||||
expectedLength = fieldTransport11RDMAMinLen
|
expectedLength = fieldTransport11RDMAMinLen
|
||||||
} else {
|
default:
|
||||||
return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
|
return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
|
||||||
}
|
}
|
||||||
if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
|
if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
|
||||||
|
|
@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
||||||
// For the udp RPC transport there is no connection count, connect idle time,
|
// For the udp RPC transport there is no connection count, connect idle time,
|
||||||
// or idle time (fields #3, #4, and #5); all other fields are the same. So
|
// or idle time (fields #3, #4, and #5); all other fields are the same. So
|
||||||
// we set them to 0 here.
|
// we set them to 0 here.
|
||||||
if protocol == "udp" {
|
switch protocol {
|
||||||
|
case "udp":
|
||||||
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
|
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
|
||||||
} else if protocol == "tcp" {
|
case "tcp":
|
||||||
ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
|
ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
|
||||||
} else if protocol == "rdma" {
|
case "rdma":
|
||||||
ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
|
ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,96 @@
|
||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc/<PID>/net/dev_snmp6/.
|
||||||
|
// The outer map's keys are interface names and the inner map's keys are stat names.
|
||||||
|
//
|
||||||
|
// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type.
|
||||||
|
type NetDevSNMP6 map[string]map[string]uint64
|
||||||
|
|
||||||
|
// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/
|
||||||
|
// directory.
|
||||||
|
func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) {
|
||||||
|
return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns kernel/system statistics read from interface files within the /proc/<PID>/net/dev_snmp6/
|
||||||
|
// directory.
|
||||||
|
func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) {
|
||||||
|
return newNetDevSNMP6(p.path("net/dev_snmp6"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory.
|
||||||
|
func newNetDevSNMP6(dir string) (NetDevSNMP6, error) {
|
||||||
|
netDevSNMP6 := make(NetDevSNMP6)
|
||||||
|
|
||||||
|
// The net/dev_snmp6 folders contain one file per interface
|
||||||
|
ifaceFiles, err := os.ReadDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
// On systems with IPv6 disabled, this directory won't exist.
|
||||||
|
// Do nothing.
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return netDevSNMP6, err
|
||||||
|
}
|
||||||
|
return netDevSNMP6, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, iFaceFile := range ifaceFiles {
|
||||||
|
f, err := os.Open(dir + "/" + iFaceFile.Name())
|
||||||
|
if err != nil {
|
||||||
|
return netDevSNMP6, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f)
|
||||||
|
if err != nil {
|
||||||
|
return netDevSNMP6, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return netDevSNMP6, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) {
|
||||||
|
m := make(map[string]uint64)
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(r)
|
||||||
|
for scanner.Scan() {
|
||||||
|
stat := strings.Fields(scanner.Text())
|
||||||
|
if len(stat) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
key, val := stat[0], stat[1]
|
||||||
|
|
||||||
|
// Expect stat name to contain "6" or be "ifIndex"
|
||||||
|
if strings.Contains(key, "6") || key == "ifIndex" {
|
||||||
|
v, err := strconv.ParseUint(val, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return m, err
|
||||||
|
}
|
||||||
|
|
||||||
|
m[key] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m, scanner.Err()
|
||||||
|
}
|
||||||
|
|
@ -25,7 +25,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// readLimit is used by io.LimitReader while reading the content of the
|
// Maximum size limit used by io.LimitReader while reading the content of the
|
||||||
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
|
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
|
||||||
// as each line represents a single used socket.
|
// as each line represents a single used socket.
|
||||||
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
|
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
|
||||||
|
|
@ -50,12 +50,12 @@ type (
|
||||||
// UsedSockets shows the total number of parsed lines representing the
|
// UsedSockets shows the total number of parsed lines representing the
|
||||||
// number of used sockets.
|
// number of used sockets.
|
||||||
UsedSockets uint64
|
UsedSockets uint64
|
||||||
// Drops shows the total number of dropped packets of all UPD sockets.
|
// Drops shows the total number of dropped packets of all UDP sockets.
|
||||||
Drops *uint64
|
Drops *uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// netIPSocketLine represents the fields parsed from a single line
|
// A single line parser for fields from /proc/net/{t,u}dp{,6}.
|
||||||
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
|
// Fields which are not used by IPSocket are skipped.
|
||||||
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
|
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
|
||||||
// For the proc file format details, see https://linux.die.net/man/5/proc.
|
// For the proc file format details, see https://linux.die.net/man/5/proc.
|
||||||
netIPSocketLine struct {
|
netIPSocketLine struct {
|
||||||
|
|
|
||||||
|
|
@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if fields[4] == enabled {
|
switch fields[4] {
|
||||||
|
case enabled:
|
||||||
line.Pressure = 1
|
line.Pressure = 1
|
||||||
} else if fields[4] == disabled {
|
case disabled:
|
||||||
line.Pressure = 0
|
line.Pressure = 0
|
||||||
} else {
|
default:
|
||||||
line.Pressure = -1
|
line.Pressure = -1
|
||||||
}
|
}
|
||||||
line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
|
line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if fields[6] == enabled {
|
switch fields[6] {
|
||||||
|
case enabled:
|
||||||
line.Slab = true
|
line.Slab = true
|
||||||
} else if fields[6] == disabled {
|
case disabled:
|
||||||
line.Slab = false
|
line.Slab = false
|
||||||
} else {
|
default:
|
||||||
return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
|
return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
|
||||||
}
|
}
|
||||||
line.ModuleName = fields[7]
|
line.ModuleName = fields[7]
|
||||||
|
|
@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(capabilities); i++ {
|
for i := 0; i < len(capabilities); i++ {
|
||||||
if capabilities[i] == "y" {
|
switch capabilities[i] {
|
||||||
|
case "y":
|
||||||
*capabilityFields[i] = true
|
*capabilityFields[i] = true
|
||||||
} else if capabilities[i] == "n" {
|
case "n":
|
||||||
*capabilityFields[i] = false
|
*capabilityFields[i] = false
|
||||||
} else {
|
default:
|
||||||
return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
|
return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -25,24 +25,28 @@ type (
|
||||||
|
|
||||||
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
|
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
|
||||||
// read from /proc/net/tcp.
|
// read from /proc/net/tcp.
|
||||||
|
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
|
||||||
func (fs FS) NetTCP() (NetTCP, error) {
|
func (fs FS) NetTCP() (NetTCP, error) {
|
||||||
return newNetTCP(fs.proc.Path("net/tcp"))
|
return newNetTCP(fs.proc.Path("net/tcp"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
|
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
|
||||||
// read from /proc/net/tcp6.
|
// read from /proc/net/tcp6.
|
||||||
|
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
|
||||||
func (fs FS) NetTCP6() (NetTCP, error) {
|
func (fs FS) NetTCP6() (NetTCP, error) {
|
||||||
return newNetTCP(fs.proc.Path("net/tcp6"))
|
return newNetTCP(fs.proc.Path("net/tcp6"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// NetTCPSummary returns already computed statistics like the total queue lengths
|
// NetTCPSummary returns already computed statistics like the total queue lengths
|
||||||
// for TCP datagrams read from /proc/net/tcp.
|
// for TCP datagrams read from /proc/net/tcp.
|
||||||
|
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
|
||||||
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
|
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
|
||||||
return newNetTCPSummary(fs.proc.Path("net/tcp"))
|
return newNetTCPSummary(fs.proc.Path("net/tcp"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// NetTCP6Summary returns already computed statistics like the total queue lengths
|
// NetTCP6Summary returns already computed statistics like the total queue lengths
|
||||||
// for TCP datagrams read from /proc/net/tcp6.
|
// for TCP datagrams read from /proc/net/tcp6.
|
||||||
|
// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
|
||||||
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
|
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
|
||||||
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
|
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
|
||||||
return &nu, nil
|
return &nu, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
|
func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) {
|
||||||
fields := strings.Fields(line)
|
fields := strings.Fields(line)
|
||||||
|
|
||||||
l := len(fields)
|
l := len(fields)
|
||||||
if l < min {
|
if l < minFields {
|
||||||
return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
|
return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Field offsets are as follows:
|
// Field offsets are as follows:
|
||||||
|
|
@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Path field is optional.
|
// Path field is optional.
|
||||||
if l > min {
|
if l > minFields {
|
||||||
// Path occurs at either index 6 or 7 depending on whether inode is
|
// Path occurs at either index 6 or 7 depending on whether inode is
|
||||||
// already present.
|
// already present.
|
||||||
pathIdx := 7
|
pathIdx := 7
|
||||||
|
|
|
||||||
|
|
@ -37,9 +37,9 @@ type Proc struct {
|
||||||
type Procs []Proc
|
type Procs []Proc
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrFileParse = errors.New("Error Parsing File")
|
ErrFileParse = errors.New("error parsing file")
|
||||||
ErrFileRead = errors.New("Error Reading File")
|
ErrFileRead = errors.New("error reading file")
|
||||||
ErrMountPoint = errors.New("Error Accessing Mount point")
|
ErrMountPoint = errors.New("error accessing mount point")
|
||||||
)
|
)
|
||||||
|
|
||||||
func (p Procs) Len() int { return len(p) }
|
func (p Procs) Len() int { return len(p) }
|
||||||
|
|
@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
|
pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), ""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
|
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
|
||||||
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
|
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource
|
||||||
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
|
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
|
||||||
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
|
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
|
||||||
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
|
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
|
||||||
|
|
|
||||||
|
|
@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) {
|
||||||
|
|
||||||
ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
|
ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
|
||||||
"read_bytes: %d\nwrite_bytes: %d\n" +
|
"read_bytes: %d\nwrite_bytes: %d\n" +
|
||||||
"cancelled_write_bytes: %d\n"
|
"cancelled_write_bytes: %d\n" //nolint:misspell
|
||||||
|
|
||||||
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
|
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
|
||||||
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
|
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
|
||||||
|
|
|
||||||
|
|
@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
|
||||||
case "TcpExt":
|
case "TcpExt":
|
||||||
switch key {
|
switch key {
|
||||||
case "SyncookiesSent":
|
case "SyncookiesSent":
|
||||||
procNetstat.TcpExt.SyncookiesSent = &value
|
procNetstat.SyncookiesSent = &value
|
||||||
case "SyncookiesRecv":
|
case "SyncookiesRecv":
|
||||||
procNetstat.TcpExt.SyncookiesRecv = &value
|
procNetstat.SyncookiesRecv = &value
|
||||||
case "SyncookiesFailed":
|
case "SyncookiesFailed":
|
||||||
procNetstat.TcpExt.SyncookiesFailed = &value
|
procNetstat.SyncookiesFailed = &value
|
||||||
case "EmbryonicRsts":
|
case "EmbryonicRsts":
|
||||||
procNetstat.TcpExt.EmbryonicRsts = &value
|
procNetstat.EmbryonicRsts = &value
|
||||||
case "PruneCalled":
|
case "PruneCalled":
|
||||||
procNetstat.TcpExt.PruneCalled = &value
|
procNetstat.PruneCalled = &value
|
||||||
case "RcvPruned":
|
case "RcvPruned":
|
||||||
procNetstat.TcpExt.RcvPruned = &value
|
procNetstat.RcvPruned = &value
|
||||||
case "OfoPruned":
|
case "OfoPruned":
|
||||||
procNetstat.TcpExt.OfoPruned = &value
|
procNetstat.OfoPruned = &value
|
||||||
case "OutOfWindowIcmps":
|
case "OutOfWindowIcmps":
|
||||||
procNetstat.TcpExt.OutOfWindowIcmps = &value
|
procNetstat.OutOfWindowIcmps = &value
|
||||||
case "LockDroppedIcmps":
|
case "LockDroppedIcmps":
|
||||||
procNetstat.TcpExt.LockDroppedIcmps = &value
|
procNetstat.LockDroppedIcmps = &value
|
||||||
case "ArpFilter":
|
case "ArpFilter":
|
||||||
procNetstat.TcpExt.ArpFilter = &value
|
procNetstat.ArpFilter = &value
|
||||||
case "TW":
|
case "TW":
|
||||||
procNetstat.TcpExt.TW = &value
|
procNetstat.TW = &value
|
||||||
case "TWRecycled":
|
case "TWRecycled":
|
||||||
procNetstat.TcpExt.TWRecycled = &value
|
procNetstat.TWRecycled = &value
|
||||||
case "TWKilled":
|
case "TWKilled":
|
||||||
procNetstat.TcpExt.TWKilled = &value
|
procNetstat.TWKilled = &value
|
||||||
case "PAWSActive":
|
case "PAWSActive":
|
||||||
procNetstat.TcpExt.PAWSActive = &value
|
procNetstat.PAWSActive = &value
|
||||||
case "PAWSEstab":
|
case "PAWSEstab":
|
||||||
procNetstat.TcpExt.PAWSEstab = &value
|
procNetstat.PAWSEstab = &value
|
||||||
case "DelayedACKs":
|
case "DelayedACKs":
|
||||||
procNetstat.TcpExt.DelayedACKs = &value
|
procNetstat.DelayedACKs = &value
|
||||||
case "DelayedACKLocked":
|
case "DelayedACKLocked":
|
||||||
procNetstat.TcpExt.DelayedACKLocked = &value
|
procNetstat.DelayedACKLocked = &value
|
||||||
case "DelayedACKLost":
|
case "DelayedACKLost":
|
||||||
procNetstat.TcpExt.DelayedACKLost = &value
|
procNetstat.DelayedACKLost = &value
|
||||||
case "ListenOverflows":
|
case "ListenOverflows":
|
||||||
procNetstat.TcpExt.ListenOverflows = &value
|
procNetstat.ListenOverflows = &value
|
||||||
case "ListenDrops":
|
case "ListenDrops":
|
||||||
procNetstat.TcpExt.ListenDrops = &value
|
procNetstat.ListenDrops = &value
|
||||||
case "TCPHPHits":
|
case "TCPHPHits":
|
||||||
procNetstat.TcpExt.TCPHPHits = &value
|
procNetstat.TCPHPHits = &value
|
||||||
case "TCPPureAcks":
|
case "TCPPureAcks":
|
||||||
procNetstat.TcpExt.TCPPureAcks = &value
|
procNetstat.TCPPureAcks = &value
|
||||||
case "TCPHPAcks":
|
case "TCPHPAcks":
|
||||||
procNetstat.TcpExt.TCPHPAcks = &value
|
procNetstat.TCPHPAcks = &value
|
||||||
case "TCPRenoRecovery":
|
case "TCPRenoRecovery":
|
||||||
procNetstat.TcpExt.TCPRenoRecovery = &value
|
procNetstat.TCPRenoRecovery = &value
|
||||||
case "TCPSackRecovery":
|
case "TCPSackRecovery":
|
||||||
procNetstat.TcpExt.TCPSackRecovery = &value
|
procNetstat.TCPSackRecovery = &value
|
||||||
case "TCPSACKReneging":
|
case "TCPSACKReneging":
|
||||||
procNetstat.TcpExt.TCPSACKReneging = &value
|
procNetstat.TCPSACKReneging = &value
|
||||||
case "TCPSACKReorder":
|
case "TCPSACKReorder":
|
||||||
procNetstat.TcpExt.TCPSACKReorder = &value
|
procNetstat.TCPSACKReorder = &value
|
||||||
case "TCPRenoReorder":
|
case "TCPRenoReorder":
|
||||||
procNetstat.TcpExt.TCPRenoReorder = &value
|
procNetstat.TCPRenoReorder = &value
|
||||||
case "TCPTSReorder":
|
case "TCPTSReorder":
|
||||||
procNetstat.TcpExt.TCPTSReorder = &value
|
procNetstat.TCPTSReorder = &value
|
||||||
case "TCPFullUndo":
|
case "TCPFullUndo":
|
||||||
procNetstat.TcpExt.TCPFullUndo = &value
|
procNetstat.TCPFullUndo = &value
|
||||||
case "TCPPartialUndo":
|
case "TCPPartialUndo":
|
||||||
procNetstat.TcpExt.TCPPartialUndo = &value
|
procNetstat.TCPPartialUndo = &value
|
||||||
case "TCPDSACKUndo":
|
case "TCPDSACKUndo":
|
||||||
procNetstat.TcpExt.TCPDSACKUndo = &value
|
procNetstat.TCPDSACKUndo = &value
|
||||||
case "TCPLossUndo":
|
case "TCPLossUndo":
|
||||||
procNetstat.TcpExt.TCPLossUndo = &value
|
procNetstat.TCPLossUndo = &value
|
||||||
case "TCPLostRetransmit":
|
case "TCPLostRetransmit":
|
||||||
procNetstat.TcpExt.TCPLostRetransmit = &value
|
procNetstat.TCPLostRetransmit = &value
|
||||||
case "TCPRenoFailures":
|
case "TCPRenoFailures":
|
||||||
procNetstat.TcpExt.TCPRenoFailures = &value
|
procNetstat.TCPRenoFailures = &value
|
||||||
case "TCPSackFailures":
|
case "TCPSackFailures":
|
||||||
procNetstat.TcpExt.TCPSackFailures = &value
|
procNetstat.TCPSackFailures = &value
|
||||||
case "TCPLossFailures":
|
case "TCPLossFailures":
|
||||||
procNetstat.TcpExt.TCPLossFailures = &value
|
procNetstat.TCPLossFailures = &value
|
||||||
case "TCPFastRetrans":
|
case "TCPFastRetrans":
|
||||||
procNetstat.TcpExt.TCPFastRetrans = &value
|
procNetstat.TCPFastRetrans = &value
|
||||||
case "TCPSlowStartRetrans":
|
case "TCPSlowStartRetrans":
|
||||||
procNetstat.TcpExt.TCPSlowStartRetrans = &value
|
procNetstat.TCPSlowStartRetrans = &value
|
||||||
case "TCPTimeouts":
|
case "TCPTimeouts":
|
||||||
procNetstat.TcpExt.TCPTimeouts = &value
|
procNetstat.TCPTimeouts = &value
|
||||||
case "TCPLossProbes":
|
case "TCPLossProbes":
|
||||||
procNetstat.TcpExt.TCPLossProbes = &value
|
procNetstat.TCPLossProbes = &value
|
||||||
case "TCPLossProbeRecovery":
|
case "TCPLossProbeRecovery":
|
||||||
procNetstat.TcpExt.TCPLossProbeRecovery = &value
|
procNetstat.TCPLossProbeRecovery = &value
|
||||||
case "TCPRenoRecoveryFail":
|
case "TCPRenoRecoveryFail":
|
||||||
procNetstat.TcpExt.TCPRenoRecoveryFail = &value
|
procNetstat.TCPRenoRecoveryFail = &value
|
||||||
case "TCPSackRecoveryFail":
|
case "TCPSackRecoveryFail":
|
||||||
procNetstat.TcpExt.TCPSackRecoveryFail = &value
|
procNetstat.TCPSackRecoveryFail = &value
|
||||||
case "TCPRcvCollapsed":
|
case "TCPRcvCollapsed":
|
||||||
procNetstat.TcpExt.TCPRcvCollapsed = &value
|
procNetstat.TCPRcvCollapsed = &value
|
||||||
case "TCPDSACKOldSent":
|
case "TCPDSACKOldSent":
|
||||||
procNetstat.TcpExt.TCPDSACKOldSent = &value
|
procNetstat.TCPDSACKOldSent = &value
|
||||||
case "TCPDSACKOfoSent":
|
case "TCPDSACKOfoSent":
|
||||||
procNetstat.TcpExt.TCPDSACKOfoSent = &value
|
procNetstat.TCPDSACKOfoSent = &value
|
||||||
case "TCPDSACKRecv":
|
case "TCPDSACKRecv":
|
||||||
procNetstat.TcpExt.TCPDSACKRecv = &value
|
procNetstat.TCPDSACKRecv = &value
|
||||||
case "TCPDSACKOfoRecv":
|
case "TCPDSACKOfoRecv":
|
||||||
procNetstat.TcpExt.TCPDSACKOfoRecv = &value
|
procNetstat.TCPDSACKOfoRecv = &value
|
||||||
case "TCPAbortOnData":
|
case "TCPAbortOnData":
|
||||||
procNetstat.TcpExt.TCPAbortOnData = &value
|
procNetstat.TCPAbortOnData = &value
|
||||||
case "TCPAbortOnClose":
|
case "TCPAbortOnClose":
|
||||||
procNetstat.TcpExt.TCPAbortOnClose = &value
|
procNetstat.TCPAbortOnClose = &value
|
||||||
case "TCPDeferAcceptDrop":
|
case "TCPDeferAcceptDrop":
|
||||||
procNetstat.TcpExt.TCPDeferAcceptDrop = &value
|
procNetstat.TCPDeferAcceptDrop = &value
|
||||||
case "IPReversePathFilter":
|
case "IPReversePathFilter":
|
||||||
procNetstat.TcpExt.IPReversePathFilter = &value
|
procNetstat.IPReversePathFilter = &value
|
||||||
case "TCPTimeWaitOverflow":
|
case "TCPTimeWaitOverflow":
|
||||||
procNetstat.TcpExt.TCPTimeWaitOverflow = &value
|
procNetstat.TCPTimeWaitOverflow = &value
|
||||||
case "TCPReqQFullDoCookies":
|
case "TCPReqQFullDoCookies":
|
||||||
procNetstat.TcpExt.TCPReqQFullDoCookies = &value
|
procNetstat.TCPReqQFullDoCookies = &value
|
||||||
case "TCPReqQFullDrop":
|
case "TCPReqQFullDrop":
|
||||||
procNetstat.TcpExt.TCPReqQFullDrop = &value
|
procNetstat.TCPReqQFullDrop = &value
|
||||||
case "TCPRetransFail":
|
case "TCPRetransFail":
|
||||||
procNetstat.TcpExt.TCPRetransFail = &value
|
procNetstat.TCPRetransFail = &value
|
||||||
case "TCPRcvCoalesce":
|
case "TCPRcvCoalesce":
|
||||||
procNetstat.TcpExt.TCPRcvCoalesce = &value
|
procNetstat.TCPRcvCoalesce = &value
|
||||||
case "TCPRcvQDrop":
|
case "TCPRcvQDrop":
|
||||||
procNetstat.TcpExt.TCPRcvQDrop = &value
|
procNetstat.TCPRcvQDrop = &value
|
||||||
case "TCPOFOQueue":
|
case "TCPOFOQueue":
|
||||||
procNetstat.TcpExt.TCPOFOQueue = &value
|
procNetstat.TCPOFOQueue = &value
|
||||||
case "TCPOFODrop":
|
case "TCPOFODrop":
|
||||||
procNetstat.TcpExt.TCPOFODrop = &value
|
procNetstat.TCPOFODrop = &value
|
||||||
case "TCPOFOMerge":
|
case "TCPOFOMerge":
|
||||||
procNetstat.TcpExt.TCPOFOMerge = &value
|
procNetstat.TCPOFOMerge = &value
|
||||||
case "TCPChallengeACK":
|
case "TCPChallengeACK":
|
||||||
procNetstat.TcpExt.TCPChallengeACK = &value
|
procNetstat.TCPChallengeACK = &value
|
||||||
case "TCPSYNChallenge":
|
case "TCPSYNChallenge":
|
||||||
procNetstat.TcpExt.TCPSYNChallenge = &value
|
procNetstat.TCPSYNChallenge = &value
|
||||||
case "TCPFastOpenActive":
|
case "TCPFastOpenActive":
|
||||||
procNetstat.TcpExt.TCPFastOpenActive = &value
|
procNetstat.TCPFastOpenActive = &value
|
||||||
case "TCPFastOpenActiveFail":
|
case "TCPFastOpenActiveFail":
|
||||||
procNetstat.TcpExt.TCPFastOpenActiveFail = &value
|
procNetstat.TCPFastOpenActiveFail = &value
|
||||||
case "TCPFastOpenPassive":
|
case "TCPFastOpenPassive":
|
||||||
procNetstat.TcpExt.TCPFastOpenPassive = &value
|
procNetstat.TCPFastOpenPassive = &value
|
||||||
case "TCPFastOpenPassiveFail":
|
case "TCPFastOpenPassiveFail":
|
||||||
procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
|
procNetstat.TCPFastOpenPassiveFail = &value
|
||||||
case "TCPFastOpenListenOverflow":
|
case "TCPFastOpenListenOverflow":
|
||||||
procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
|
procNetstat.TCPFastOpenListenOverflow = &value
|
||||||
case "TCPFastOpenCookieReqd":
|
case "TCPFastOpenCookieReqd":
|
||||||
procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
|
procNetstat.TCPFastOpenCookieReqd = &value
|
||||||
case "TCPFastOpenBlackhole":
|
case "TCPFastOpenBlackhole":
|
||||||
procNetstat.TcpExt.TCPFastOpenBlackhole = &value
|
procNetstat.TCPFastOpenBlackhole = &value
|
||||||
case "TCPSpuriousRtxHostQueues":
|
case "TCPSpuriousRtxHostQueues":
|
||||||
procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
|
procNetstat.TCPSpuriousRtxHostQueues = &value
|
||||||
case "BusyPollRxPackets":
|
case "BusyPollRxPackets":
|
||||||
procNetstat.TcpExt.BusyPollRxPackets = &value
|
procNetstat.BusyPollRxPackets = &value
|
||||||
case "TCPAutoCorking":
|
case "TCPAutoCorking":
|
||||||
procNetstat.TcpExt.TCPAutoCorking = &value
|
procNetstat.TCPAutoCorking = &value
|
||||||
case "TCPFromZeroWindowAdv":
|
case "TCPFromZeroWindowAdv":
|
||||||
procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
|
procNetstat.TCPFromZeroWindowAdv = &value
|
||||||
case "TCPToZeroWindowAdv":
|
case "TCPToZeroWindowAdv":
|
||||||
procNetstat.TcpExt.TCPToZeroWindowAdv = &value
|
procNetstat.TCPToZeroWindowAdv = &value
|
||||||
case "TCPWantZeroWindowAdv":
|
case "TCPWantZeroWindowAdv":
|
||||||
procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
|
procNetstat.TCPWantZeroWindowAdv = &value
|
||||||
case "TCPSynRetrans":
|
case "TCPSynRetrans":
|
||||||
procNetstat.TcpExt.TCPSynRetrans = &value
|
procNetstat.TCPSynRetrans = &value
|
||||||
case "TCPOrigDataSent":
|
case "TCPOrigDataSent":
|
||||||
procNetstat.TcpExt.TCPOrigDataSent = &value
|
procNetstat.TCPOrigDataSent = &value
|
||||||
case "TCPHystartTrainDetect":
|
case "TCPHystartTrainDetect":
|
||||||
procNetstat.TcpExt.TCPHystartTrainDetect = &value
|
procNetstat.TCPHystartTrainDetect = &value
|
||||||
case "TCPHystartTrainCwnd":
|
case "TCPHystartTrainCwnd":
|
||||||
procNetstat.TcpExt.TCPHystartTrainCwnd = &value
|
procNetstat.TCPHystartTrainCwnd = &value
|
||||||
case "TCPHystartDelayDetect":
|
case "TCPHystartDelayDetect":
|
||||||
procNetstat.TcpExt.TCPHystartDelayDetect = &value
|
procNetstat.TCPHystartDelayDetect = &value
|
||||||
case "TCPHystartDelayCwnd":
|
case "TCPHystartDelayCwnd":
|
||||||
procNetstat.TcpExt.TCPHystartDelayCwnd = &value
|
procNetstat.TCPHystartDelayCwnd = &value
|
||||||
case "TCPACKSkippedSynRecv":
|
case "TCPACKSkippedSynRecv":
|
||||||
procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
|
procNetstat.TCPACKSkippedSynRecv = &value
|
||||||
case "TCPACKSkippedPAWS":
|
case "TCPACKSkippedPAWS":
|
||||||
procNetstat.TcpExt.TCPACKSkippedPAWS = &value
|
procNetstat.TCPACKSkippedPAWS = &value
|
||||||
case "TCPACKSkippedSeq":
|
case "TCPACKSkippedSeq":
|
||||||
procNetstat.TcpExt.TCPACKSkippedSeq = &value
|
procNetstat.TCPACKSkippedSeq = &value
|
||||||
case "TCPACKSkippedFinWait2":
|
case "TCPACKSkippedFinWait2":
|
||||||
procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
|
procNetstat.TCPACKSkippedFinWait2 = &value
|
||||||
case "TCPACKSkippedTimeWait":
|
case "TCPACKSkippedTimeWait":
|
||||||
procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
|
procNetstat.TCPACKSkippedTimeWait = &value
|
||||||
case "TCPACKSkippedChallenge":
|
case "TCPACKSkippedChallenge":
|
||||||
procNetstat.TcpExt.TCPACKSkippedChallenge = &value
|
procNetstat.TCPACKSkippedChallenge = &value
|
||||||
case "TCPWinProbe":
|
case "TCPWinProbe":
|
||||||
procNetstat.TcpExt.TCPWinProbe = &value
|
procNetstat.TCPWinProbe = &value
|
||||||
case "TCPKeepAlive":
|
case "TCPKeepAlive":
|
||||||
procNetstat.TcpExt.TCPKeepAlive = &value
|
procNetstat.TCPKeepAlive = &value
|
||||||
case "TCPMTUPFail":
|
case "TCPMTUPFail":
|
||||||
procNetstat.TcpExt.TCPMTUPFail = &value
|
procNetstat.TCPMTUPFail = &value
|
||||||
case "TCPMTUPSuccess":
|
case "TCPMTUPSuccess":
|
||||||
procNetstat.TcpExt.TCPMTUPSuccess = &value
|
procNetstat.TCPMTUPSuccess = &value
|
||||||
case "TCPWqueueTooBig":
|
case "TCPWqueueTooBig":
|
||||||
procNetstat.TcpExt.TCPWqueueTooBig = &value
|
procNetstat.TCPWqueueTooBig = &value
|
||||||
}
|
}
|
||||||
case "IpExt":
|
case "IpExt":
|
||||||
switch key {
|
switch key {
|
||||||
case "InNoRoutes":
|
case "InNoRoutes":
|
||||||
procNetstat.IpExt.InNoRoutes = &value
|
procNetstat.InNoRoutes = &value
|
||||||
case "InTruncatedPkts":
|
case "InTruncatedPkts":
|
||||||
procNetstat.IpExt.InTruncatedPkts = &value
|
procNetstat.InTruncatedPkts = &value
|
||||||
case "InMcastPkts":
|
case "InMcastPkts":
|
||||||
procNetstat.IpExt.InMcastPkts = &value
|
procNetstat.InMcastPkts = &value
|
||||||
case "OutMcastPkts":
|
case "OutMcastPkts":
|
||||||
procNetstat.IpExt.OutMcastPkts = &value
|
procNetstat.OutMcastPkts = &value
|
||||||
case "InBcastPkts":
|
case "InBcastPkts":
|
||||||
procNetstat.IpExt.InBcastPkts = &value
|
procNetstat.InBcastPkts = &value
|
||||||
case "OutBcastPkts":
|
case "OutBcastPkts":
|
||||||
procNetstat.IpExt.OutBcastPkts = &value
|
procNetstat.OutBcastPkts = &value
|
||||||
case "InOctets":
|
case "InOctets":
|
||||||
procNetstat.IpExt.InOctets = &value
|
procNetstat.InOctets = &value
|
||||||
case "OutOctets":
|
case "OutOctets":
|
||||||
procNetstat.IpExt.OutOctets = &value
|
procNetstat.OutOctets = &value
|
||||||
case "InMcastOctets":
|
case "InMcastOctets":
|
||||||
procNetstat.IpExt.InMcastOctets = &value
|
procNetstat.InMcastOctets = &value
|
||||||
case "OutMcastOctets":
|
case "OutMcastOctets":
|
||||||
procNetstat.IpExt.OutMcastOctets = &value
|
procNetstat.OutMcastOctets = &value
|
||||||
case "InBcastOctets":
|
case "InBcastOctets":
|
||||||
procNetstat.IpExt.InBcastOctets = &value
|
procNetstat.InBcastOctets = &value
|
||||||
case "OutBcastOctets":
|
case "OutBcastOctets":
|
||||||
procNetstat.IpExt.OutBcastOctets = &value
|
procNetstat.OutBcastOctets = &value
|
||||||
case "InCsumErrors":
|
case "InCsumErrors":
|
||||||
procNetstat.IpExt.InCsumErrors = &value
|
procNetstat.InCsumErrors = &value
|
||||||
case "InNoECTPkts":
|
case "InNoECTPkts":
|
||||||
procNetstat.IpExt.InNoECTPkts = &value
|
procNetstat.InNoECTPkts = &value
|
||||||
case "InECT1Pkts":
|
case "InECT1Pkts":
|
||||||
procNetstat.IpExt.InECT1Pkts = &value
|
procNetstat.InECT1Pkts = &value
|
||||||
case "InECT0Pkts":
|
case "InECT0Pkts":
|
||||||
procNetstat.IpExt.InECT0Pkts = &value
|
procNetstat.InECT0Pkts = &value
|
||||||
case "InCEPkts":
|
case "InCEPkts":
|
||||||
procNetstat.IpExt.InCEPkts = &value
|
procNetstat.InCEPkts = &value
|
||||||
case "ReasmOverlaps":
|
case "ReasmOverlaps":
|
||||||
procNetstat.IpExt.ReasmOverlaps = &value
|
procNetstat.ReasmOverlaps = &value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,6 @@ package procfs
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
@ -29,7 +28,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// match the header line before each mapped zone in `/proc/pid/smaps`.
|
// Match the header line before each mapped zone in `/proc/pid/smaps`.
|
||||||
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
|
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
|
||||||
func (s *ProcSMapsRollup) parseLine(line string) error {
|
func (s *ProcSMapsRollup) parseLine(line string) error {
|
||||||
kv := strings.SplitN(line, ":", 2)
|
kv := strings.SplitN(line, ":", 2)
|
||||||
if len(kv) != 2 {
|
if len(kv) != 2 {
|
||||||
fmt.Println(line)
|
|
||||||
return errors.New("invalid net/dev line, missing colon")
|
return errors.New("invalid net/dev line, missing colon")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
|
||||||
case "Ip":
|
case "Ip":
|
||||||
switch key {
|
switch key {
|
||||||
case "Forwarding":
|
case "Forwarding":
|
||||||
procSnmp.Ip.Forwarding = &value
|
procSnmp.Forwarding = &value
|
||||||
case "DefaultTTL":
|
case "DefaultTTL":
|
||||||
procSnmp.Ip.DefaultTTL = &value
|
procSnmp.DefaultTTL = &value
|
||||||
case "InReceives":
|
case "InReceives":
|
||||||
procSnmp.Ip.InReceives = &value
|
procSnmp.InReceives = &value
|
||||||
case "InHdrErrors":
|
case "InHdrErrors":
|
||||||
procSnmp.Ip.InHdrErrors = &value
|
procSnmp.InHdrErrors = &value
|
||||||
case "InAddrErrors":
|
case "InAddrErrors":
|
||||||
procSnmp.Ip.InAddrErrors = &value
|
procSnmp.InAddrErrors = &value
|
||||||
case "ForwDatagrams":
|
case "ForwDatagrams":
|
||||||
procSnmp.Ip.ForwDatagrams = &value
|
procSnmp.ForwDatagrams = &value
|
||||||
case "InUnknownProtos":
|
case "InUnknownProtos":
|
||||||
procSnmp.Ip.InUnknownProtos = &value
|
procSnmp.InUnknownProtos = &value
|
||||||
case "InDiscards":
|
case "InDiscards":
|
||||||
procSnmp.Ip.InDiscards = &value
|
procSnmp.InDiscards = &value
|
||||||
case "InDelivers":
|
case "InDelivers":
|
||||||
procSnmp.Ip.InDelivers = &value
|
procSnmp.InDelivers = &value
|
||||||
case "OutRequests":
|
case "OutRequests":
|
||||||
procSnmp.Ip.OutRequests = &value
|
procSnmp.OutRequests = &value
|
||||||
case "OutDiscards":
|
case "OutDiscards":
|
||||||
procSnmp.Ip.OutDiscards = &value
|
procSnmp.OutDiscards = &value
|
||||||
case "OutNoRoutes":
|
case "OutNoRoutes":
|
||||||
procSnmp.Ip.OutNoRoutes = &value
|
procSnmp.OutNoRoutes = &value
|
||||||
case "ReasmTimeout":
|
case "ReasmTimeout":
|
||||||
procSnmp.Ip.ReasmTimeout = &value
|
procSnmp.ReasmTimeout = &value
|
||||||
case "ReasmReqds":
|
case "ReasmReqds":
|
||||||
procSnmp.Ip.ReasmReqds = &value
|
procSnmp.ReasmReqds = &value
|
||||||
case "ReasmOKs":
|
case "ReasmOKs":
|
||||||
procSnmp.Ip.ReasmOKs = &value
|
procSnmp.ReasmOKs = &value
|
||||||
case "ReasmFails":
|
case "ReasmFails":
|
||||||
procSnmp.Ip.ReasmFails = &value
|
procSnmp.ReasmFails = &value
|
||||||
case "FragOKs":
|
case "FragOKs":
|
||||||
procSnmp.Ip.FragOKs = &value
|
procSnmp.FragOKs = &value
|
||||||
case "FragFails":
|
case "FragFails":
|
||||||
procSnmp.Ip.FragFails = &value
|
procSnmp.FragFails = &value
|
||||||
case "FragCreates":
|
case "FragCreates":
|
||||||
procSnmp.Ip.FragCreates = &value
|
procSnmp.FragCreates = &value
|
||||||
}
|
}
|
||||||
case "Icmp":
|
case "Icmp":
|
||||||
switch key {
|
switch key {
|
||||||
case "InMsgs":
|
case "InMsgs":
|
||||||
procSnmp.Icmp.InMsgs = &value
|
procSnmp.InMsgs = &value
|
||||||
case "InErrors":
|
case "InErrors":
|
||||||
procSnmp.Icmp.InErrors = &value
|
procSnmp.Icmp.InErrors = &value
|
||||||
case "InCsumErrors":
|
case "InCsumErrors":
|
||||||
procSnmp.Icmp.InCsumErrors = &value
|
procSnmp.Icmp.InCsumErrors = &value
|
||||||
case "InDestUnreachs":
|
case "InDestUnreachs":
|
||||||
procSnmp.Icmp.InDestUnreachs = &value
|
procSnmp.InDestUnreachs = &value
|
||||||
case "InTimeExcds":
|
case "InTimeExcds":
|
||||||
procSnmp.Icmp.InTimeExcds = &value
|
procSnmp.InTimeExcds = &value
|
||||||
case "InParmProbs":
|
case "InParmProbs":
|
||||||
procSnmp.Icmp.InParmProbs = &value
|
procSnmp.InParmProbs = &value
|
||||||
case "InSrcQuenchs":
|
case "InSrcQuenchs":
|
||||||
procSnmp.Icmp.InSrcQuenchs = &value
|
procSnmp.InSrcQuenchs = &value
|
||||||
case "InRedirects":
|
case "InRedirects":
|
||||||
procSnmp.Icmp.InRedirects = &value
|
procSnmp.InRedirects = &value
|
||||||
case "InEchos":
|
case "InEchos":
|
||||||
procSnmp.Icmp.InEchos = &value
|
procSnmp.InEchos = &value
|
||||||
case "InEchoReps":
|
case "InEchoReps":
|
||||||
procSnmp.Icmp.InEchoReps = &value
|
procSnmp.InEchoReps = &value
|
||||||
case "InTimestamps":
|
case "InTimestamps":
|
||||||
procSnmp.Icmp.InTimestamps = &value
|
procSnmp.InTimestamps = &value
|
||||||
case "InTimestampReps":
|
case "InTimestampReps":
|
||||||
procSnmp.Icmp.InTimestampReps = &value
|
procSnmp.InTimestampReps = &value
|
||||||
case "InAddrMasks":
|
case "InAddrMasks":
|
||||||
procSnmp.Icmp.InAddrMasks = &value
|
procSnmp.InAddrMasks = &value
|
||||||
case "InAddrMaskReps":
|
case "InAddrMaskReps":
|
||||||
procSnmp.Icmp.InAddrMaskReps = &value
|
procSnmp.InAddrMaskReps = &value
|
||||||
case "OutMsgs":
|
case "OutMsgs":
|
||||||
procSnmp.Icmp.OutMsgs = &value
|
procSnmp.OutMsgs = &value
|
||||||
case "OutErrors":
|
case "OutErrors":
|
||||||
procSnmp.Icmp.OutErrors = &value
|
procSnmp.OutErrors = &value
|
||||||
case "OutDestUnreachs":
|
case "OutDestUnreachs":
|
||||||
procSnmp.Icmp.OutDestUnreachs = &value
|
procSnmp.OutDestUnreachs = &value
|
||||||
case "OutTimeExcds":
|
case "OutTimeExcds":
|
||||||
procSnmp.Icmp.OutTimeExcds = &value
|
procSnmp.OutTimeExcds = &value
|
||||||
case "OutParmProbs":
|
case "OutParmProbs":
|
||||||
procSnmp.Icmp.OutParmProbs = &value
|
procSnmp.OutParmProbs = &value
|
||||||
case "OutSrcQuenchs":
|
case "OutSrcQuenchs":
|
||||||
procSnmp.Icmp.OutSrcQuenchs = &value
|
procSnmp.OutSrcQuenchs = &value
|
||||||
case "OutRedirects":
|
case "OutRedirects":
|
||||||
procSnmp.Icmp.OutRedirects = &value
|
procSnmp.OutRedirects = &value
|
||||||
case "OutEchos":
|
case "OutEchos":
|
||||||
procSnmp.Icmp.OutEchos = &value
|
procSnmp.OutEchos = &value
|
||||||
case "OutEchoReps":
|
case "OutEchoReps":
|
||||||
procSnmp.Icmp.OutEchoReps = &value
|
procSnmp.OutEchoReps = &value
|
||||||
case "OutTimestamps":
|
case "OutTimestamps":
|
||||||
procSnmp.Icmp.OutTimestamps = &value
|
procSnmp.OutTimestamps = &value
|
||||||
case "OutTimestampReps":
|
case "OutTimestampReps":
|
||||||
procSnmp.Icmp.OutTimestampReps = &value
|
procSnmp.OutTimestampReps = &value
|
||||||
case "OutAddrMasks":
|
case "OutAddrMasks":
|
||||||
procSnmp.Icmp.OutAddrMasks = &value
|
procSnmp.OutAddrMasks = &value
|
||||||
case "OutAddrMaskReps":
|
case "OutAddrMaskReps":
|
||||||
procSnmp.Icmp.OutAddrMaskReps = &value
|
procSnmp.OutAddrMaskReps = &value
|
||||||
}
|
}
|
||||||
case "IcmpMsg":
|
case "IcmpMsg":
|
||||||
switch key {
|
switch key {
|
||||||
case "InType3":
|
case "InType3":
|
||||||
procSnmp.IcmpMsg.InType3 = &value
|
procSnmp.InType3 = &value
|
||||||
case "OutType3":
|
case "OutType3":
|
||||||
procSnmp.IcmpMsg.OutType3 = &value
|
procSnmp.OutType3 = &value
|
||||||
}
|
}
|
||||||
case "Tcp":
|
case "Tcp":
|
||||||
switch key {
|
switch key {
|
||||||
case "RtoAlgorithm":
|
case "RtoAlgorithm":
|
||||||
procSnmp.Tcp.RtoAlgorithm = &value
|
procSnmp.RtoAlgorithm = &value
|
||||||
case "RtoMin":
|
case "RtoMin":
|
||||||
procSnmp.Tcp.RtoMin = &value
|
procSnmp.RtoMin = &value
|
||||||
case "RtoMax":
|
case "RtoMax":
|
||||||
procSnmp.Tcp.RtoMax = &value
|
procSnmp.RtoMax = &value
|
||||||
case "MaxConn":
|
case "MaxConn":
|
||||||
procSnmp.Tcp.MaxConn = &value
|
procSnmp.MaxConn = &value
|
||||||
case "ActiveOpens":
|
case "ActiveOpens":
|
||||||
procSnmp.Tcp.ActiveOpens = &value
|
procSnmp.ActiveOpens = &value
|
||||||
case "PassiveOpens":
|
case "PassiveOpens":
|
||||||
procSnmp.Tcp.PassiveOpens = &value
|
procSnmp.PassiveOpens = &value
|
||||||
case "AttemptFails":
|
case "AttemptFails":
|
||||||
procSnmp.Tcp.AttemptFails = &value
|
procSnmp.AttemptFails = &value
|
||||||
case "EstabResets":
|
case "EstabResets":
|
||||||
procSnmp.Tcp.EstabResets = &value
|
procSnmp.EstabResets = &value
|
||||||
case "CurrEstab":
|
case "CurrEstab":
|
||||||
procSnmp.Tcp.CurrEstab = &value
|
procSnmp.CurrEstab = &value
|
||||||
case "InSegs":
|
case "InSegs":
|
||||||
procSnmp.Tcp.InSegs = &value
|
procSnmp.InSegs = &value
|
||||||
case "OutSegs":
|
case "OutSegs":
|
||||||
procSnmp.Tcp.OutSegs = &value
|
procSnmp.OutSegs = &value
|
||||||
case "RetransSegs":
|
case "RetransSegs":
|
||||||
procSnmp.Tcp.RetransSegs = &value
|
procSnmp.RetransSegs = &value
|
||||||
case "InErrs":
|
case "InErrs":
|
||||||
procSnmp.Tcp.InErrs = &value
|
procSnmp.InErrs = &value
|
||||||
case "OutRsts":
|
case "OutRsts":
|
||||||
procSnmp.Tcp.OutRsts = &value
|
procSnmp.OutRsts = &value
|
||||||
case "InCsumErrors":
|
case "InCsumErrors":
|
||||||
procSnmp.Tcp.InCsumErrors = &value
|
procSnmp.Tcp.InCsumErrors = &value
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
|
||||||
case "Ip6":
|
case "Ip6":
|
||||||
switch key {
|
switch key {
|
||||||
case "InReceives":
|
case "InReceives":
|
||||||
procSnmp6.Ip6.InReceives = &value
|
procSnmp6.InReceives = &value
|
||||||
case "InHdrErrors":
|
case "InHdrErrors":
|
||||||
procSnmp6.Ip6.InHdrErrors = &value
|
procSnmp6.InHdrErrors = &value
|
||||||
case "InTooBigErrors":
|
case "InTooBigErrors":
|
||||||
procSnmp6.Ip6.InTooBigErrors = &value
|
procSnmp6.InTooBigErrors = &value
|
||||||
case "InNoRoutes":
|
case "InNoRoutes":
|
||||||
procSnmp6.Ip6.InNoRoutes = &value
|
procSnmp6.InNoRoutes = &value
|
||||||
case "InAddrErrors":
|
case "InAddrErrors":
|
||||||
procSnmp6.Ip6.InAddrErrors = &value
|
procSnmp6.InAddrErrors = &value
|
||||||
case "InUnknownProtos":
|
case "InUnknownProtos":
|
||||||
procSnmp6.Ip6.InUnknownProtos = &value
|
procSnmp6.InUnknownProtos = &value
|
||||||
case "InTruncatedPkts":
|
case "InTruncatedPkts":
|
||||||
procSnmp6.Ip6.InTruncatedPkts = &value
|
procSnmp6.InTruncatedPkts = &value
|
||||||
case "InDiscards":
|
case "InDiscards":
|
||||||
procSnmp6.Ip6.InDiscards = &value
|
procSnmp6.InDiscards = &value
|
||||||
case "InDelivers":
|
case "InDelivers":
|
||||||
procSnmp6.Ip6.InDelivers = &value
|
procSnmp6.InDelivers = &value
|
||||||
case "OutForwDatagrams":
|
case "OutForwDatagrams":
|
||||||
procSnmp6.Ip6.OutForwDatagrams = &value
|
procSnmp6.OutForwDatagrams = &value
|
||||||
case "OutRequests":
|
case "OutRequests":
|
||||||
procSnmp6.Ip6.OutRequests = &value
|
procSnmp6.OutRequests = &value
|
||||||
case "OutDiscards":
|
case "OutDiscards":
|
||||||
procSnmp6.Ip6.OutDiscards = &value
|
procSnmp6.OutDiscards = &value
|
||||||
case "OutNoRoutes":
|
case "OutNoRoutes":
|
||||||
procSnmp6.Ip6.OutNoRoutes = &value
|
procSnmp6.OutNoRoutes = &value
|
||||||
case "ReasmTimeout":
|
case "ReasmTimeout":
|
||||||
procSnmp6.Ip6.ReasmTimeout = &value
|
procSnmp6.ReasmTimeout = &value
|
||||||
case "ReasmReqds":
|
case "ReasmReqds":
|
||||||
procSnmp6.Ip6.ReasmReqds = &value
|
procSnmp6.ReasmReqds = &value
|
||||||
case "ReasmOKs":
|
case "ReasmOKs":
|
||||||
procSnmp6.Ip6.ReasmOKs = &value
|
procSnmp6.ReasmOKs = &value
|
||||||
case "ReasmFails":
|
case "ReasmFails":
|
||||||
procSnmp6.Ip6.ReasmFails = &value
|
procSnmp6.ReasmFails = &value
|
||||||
case "FragOKs":
|
case "FragOKs":
|
||||||
procSnmp6.Ip6.FragOKs = &value
|
procSnmp6.FragOKs = &value
|
||||||
case "FragFails":
|
case "FragFails":
|
||||||
procSnmp6.Ip6.FragFails = &value
|
procSnmp6.FragFails = &value
|
||||||
case "FragCreates":
|
case "FragCreates":
|
||||||
procSnmp6.Ip6.FragCreates = &value
|
procSnmp6.FragCreates = &value
|
||||||
case "InMcastPkts":
|
case "InMcastPkts":
|
||||||
procSnmp6.Ip6.InMcastPkts = &value
|
procSnmp6.InMcastPkts = &value
|
||||||
case "OutMcastPkts":
|
case "OutMcastPkts":
|
||||||
procSnmp6.Ip6.OutMcastPkts = &value
|
procSnmp6.OutMcastPkts = &value
|
||||||
case "InOctets":
|
case "InOctets":
|
||||||
procSnmp6.Ip6.InOctets = &value
|
procSnmp6.InOctets = &value
|
||||||
case "OutOctets":
|
case "OutOctets":
|
||||||
procSnmp6.Ip6.OutOctets = &value
|
procSnmp6.OutOctets = &value
|
||||||
case "InMcastOctets":
|
case "InMcastOctets":
|
||||||
procSnmp6.Ip6.InMcastOctets = &value
|
procSnmp6.InMcastOctets = &value
|
||||||
case "OutMcastOctets":
|
case "OutMcastOctets":
|
||||||
procSnmp6.Ip6.OutMcastOctets = &value
|
procSnmp6.OutMcastOctets = &value
|
||||||
case "InBcastOctets":
|
case "InBcastOctets":
|
||||||
procSnmp6.Ip6.InBcastOctets = &value
|
procSnmp6.InBcastOctets = &value
|
||||||
case "OutBcastOctets":
|
case "OutBcastOctets":
|
||||||
procSnmp6.Ip6.OutBcastOctets = &value
|
procSnmp6.OutBcastOctets = &value
|
||||||
case "InNoECTPkts":
|
case "InNoECTPkts":
|
||||||
procSnmp6.Ip6.InNoECTPkts = &value
|
procSnmp6.InNoECTPkts = &value
|
||||||
case "InECT1Pkts":
|
case "InECT1Pkts":
|
||||||
procSnmp6.Ip6.InECT1Pkts = &value
|
procSnmp6.InECT1Pkts = &value
|
||||||
case "InECT0Pkts":
|
case "InECT0Pkts":
|
||||||
procSnmp6.Ip6.InECT0Pkts = &value
|
procSnmp6.InECT0Pkts = &value
|
||||||
case "InCEPkts":
|
case "InCEPkts":
|
||||||
procSnmp6.Ip6.InCEPkts = &value
|
procSnmp6.InCEPkts = &value
|
||||||
|
|
||||||
}
|
}
|
||||||
case "Icmp6":
|
case "Icmp6":
|
||||||
switch key {
|
switch key {
|
||||||
case "InMsgs":
|
case "InMsgs":
|
||||||
procSnmp6.Icmp6.InMsgs = &value
|
procSnmp6.InMsgs = &value
|
||||||
case "InErrors":
|
case "InErrors":
|
||||||
procSnmp6.Icmp6.InErrors = &value
|
procSnmp6.Icmp6.InErrors = &value
|
||||||
case "OutMsgs":
|
case "OutMsgs":
|
||||||
procSnmp6.Icmp6.OutMsgs = &value
|
procSnmp6.OutMsgs = &value
|
||||||
case "OutErrors":
|
case "OutErrors":
|
||||||
procSnmp6.Icmp6.OutErrors = &value
|
procSnmp6.OutErrors = &value
|
||||||
case "InCsumErrors":
|
case "InCsumErrors":
|
||||||
procSnmp6.Icmp6.InCsumErrors = &value
|
procSnmp6.Icmp6.InCsumErrors = &value
|
||||||
case "InDestUnreachs":
|
case "InDestUnreachs":
|
||||||
procSnmp6.Icmp6.InDestUnreachs = &value
|
procSnmp6.InDestUnreachs = &value
|
||||||
case "InPktTooBigs":
|
case "InPktTooBigs":
|
||||||
procSnmp6.Icmp6.InPktTooBigs = &value
|
procSnmp6.InPktTooBigs = &value
|
||||||
case "InTimeExcds":
|
case "InTimeExcds":
|
||||||
procSnmp6.Icmp6.InTimeExcds = &value
|
procSnmp6.InTimeExcds = &value
|
||||||
case "InParmProblems":
|
case "InParmProblems":
|
||||||
procSnmp6.Icmp6.InParmProblems = &value
|
procSnmp6.InParmProblems = &value
|
||||||
case "InEchos":
|
case "InEchos":
|
||||||
procSnmp6.Icmp6.InEchos = &value
|
procSnmp6.InEchos = &value
|
||||||
case "InEchoReplies":
|
case "InEchoReplies":
|
||||||
procSnmp6.Icmp6.InEchoReplies = &value
|
procSnmp6.InEchoReplies = &value
|
||||||
case "InGroupMembQueries":
|
case "InGroupMembQueries":
|
||||||
procSnmp6.Icmp6.InGroupMembQueries = &value
|
procSnmp6.InGroupMembQueries = &value
|
||||||
case "InGroupMembResponses":
|
case "InGroupMembResponses":
|
||||||
procSnmp6.Icmp6.InGroupMembResponses = &value
|
procSnmp6.InGroupMembResponses = &value
|
||||||
case "InGroupMembReductions":
|
case "InGroupMembReductions":
|
||||||
procSnmp6.Icmp6.InGroupMembReductions = &value
|
procSnmp6.InGroupMembReductions = &value
|
||||||
case "InRouterSolicits":
|
case "InRouterSolicits":
|
||||||
procSnmp6.Icmp6.InRouterSolicits = &value
|
procSnmp6.InRouterSolicits = &value
|
||||||
case "InRouterAdvertisements":
|
case "InRouterAdvertisements":
|
||||||
procSnmp6.Icmp6.InRouterAdvertisements = &value
|
procSnmp6.InRouterAdvertisements = &value
|
||||||
case "InNeighborSolicits":
|
case "InNeighborSolicits":
|
||||||
procSnmp6.Icmp6.InNeighborSolicits = &value
|
procSnmp6.InNeighborSolicits = &value
|
||||||
case "InNeighborAdvertisements":
|
case "InNeighborAdvertisements":
|
||||||
procSnmp6.Icmp6.InNeighborAdvertisements = &value
|
procSnmp6.InNeighborAdvertisements = &value
|
||||||
case "InRedirects":
|
case "InRedirects":
|
||||||
procSnmp6.Icmp6.InRedirects = &value
|
procSnmp6.InRedirects = &value
|
||||||
case "InMLDv2Reports":
|
case "InMLDv2Reports":
|
||||||
procSnmp6.Icmp6.InMLDv2Reports = &value
|
procSnmp6.InMLDv2Reports = &value
|
||||||
case "OutDestUnreachs":
|
case "OutDestUnreachs":
|
||||||
procSnmp6.Icmp6.OutDestUnreachs = &value
|
procSnmp6.OutDestUnreachs = &value
|
||||||
case "OutPktTooBigs":
|
case "OutPktTooBigs":
|
||||||
procSnmp6.Icmp6.OutPktTooBigs = &value
|
procSnmp6.OutPktTooBigs = &value
|
||||||
case "OutTimeExcds":
|
case "OutTimeExcds":
|
||||||
procSnmp6.Icmp6.OutTimeExcds = &value
|
procSnmp6.OutTimeExcds = &value
|
||||||
case "OutParmProblems":
|
case "OutParmProblems":
|
||||||
procSnmp6.Icmp6.OutParmProblems = &value
|
procSnmp6.OutParmProblems = &value
|
||||||
case "OutEchos":
|
case "OutEchos":
|
||||||
procSnmp6.Icmp6.OutEchos = &value
|
procSnmp6.OutEchos = &value
|
||||||
case "OutEchoReplies":
|
case "OutEchoReplies":
|
||||||
procSnmp6.Icmp6.OutEchoReplies = &value
|
procSnmp6.OutEchoReplies = &value
|
||||||
case "OutGroupMembQueries":
|
case "OutGroupMembQueries":
|
||||||
procSnmp6.Icmp6.OutGroupMembQueries = &value
|
procSnmp6.OutGroupMembQueries = &value
|
||||||
case "OutGroupMembResponses":
|
case "OutGroupMembResponses":
|
||||||
procSnmp6.Icmp6.OutGroupMembResponses = &value
|
procSnmp6.OutGroupMembResponses = &value
|
||||||
case "OutGroupMembReductions":
|
case "OutGroupMembReductions":
|
||||||
procSnmp6.Icmp6.OutGroupMembReductions = &value
|
procSnmp6.OutGroupMembReductions = &value
|
||||||
case "OutRouterSolicits":
|
case "OutRouterSolicits":
|
||||||
procSnmp6.Icmp6.OutRouterSolicits = &value
|
procSnmp6.OutRouterSolicits = &value
|
||||||
case "OutRouterAdvertisements":
|
case "OutRouterAdvertisements":
|
||||||
procSnmp6.Icmp6.OutRouterAdvertisements = &value
|
procSnmp6.OutRouterAdvertisements = &value
|
||||||
case "OutNeighborSolicits":
|
case "OutNeighborSolicits":
|
||||||
procSnmp6.Icmp6.OutNeighborSolicits = &value
|
procSnmp6.OutNeighborSolicits = &value
|
||||||
case "OutNeighborAdvertisements":
|
case "OutNeighborAdvertisements":
|
||||||
procSnmp6.Icmp6.OutNeighborAdvertisements = &value
|
procSnmp6.OutNeighborAdvertisements = &value
|
||||||
case "OutRedirects":
|
case "OutRedirects":
|
||||||
procSnmp6.Icmp6.OutRedirects = &value
|
procSnmp6.OutRedirects = &value
|
||||||
case "OutMLDv2Reports":
|
case "OutMLDv2Reports":
|
||||||
procSnmp6.Icmp6.OutMLDv2Reports = &value
|
procSnmp6.OutMLDv2Reports = &value
|
||||||
case "InType1":
|
case "InType1":
|
||||||
procSnmp6.Icmp6.InType1 = &value
|
procSnmp6.InType1 = &value
|
||||||
case "InType134":
|
case "InType134":
|
||||||
procSnmp6.Icmp6.InType134 = &value
|
procSnmp6.InType134 = &value
|
||||||
case "InType135":
|
case "InType135":
|
||||||
procSnmp6.Icmp6.InType135 = &value
|
procSnmp6.InType135 = &value
|
||||||
case "InType136":
|
case "InType136":
|
||||||
procSnmp6.Icmp6.InType136 = &value
|
procSnmp6.InType136 = &value
|
||||||
case "InType143":
|
case "InType143":
|
||||||
procSnmp6.Icmp6.InType143 = &value
|
procSnmp6.InType143 = &value
|
||||||
case "OutType133":
|
case "OutType133":
|
||||||
procSnmp6.Icmp6.OutType133 = &value
|
procSnmp6.OutType133 = &value
|
||||||
case "OutType135":
|
case "OutType135":
|
||||||
procSnmp6.Icmp6.OutType135 = &value
|
procSnmp6.OutType135 = &value
|
||||||
case "OutType136":
|
case "OutType136":
|
||||||
procSnmp6.Icmp6.OutType136 = &value
|
procSnmp6.OutType136 = &value
|
||||||
case "OutType143":
|
case "OutType143":
|
||||||
procSnmp6.Icmp6.OutType143 = &value
|
procSnmp6.OutType143 = &value
|
||||||
}
|
}
|
||||||
case "Udp6":
|
case "Udp6":
|
||||||
switch key {
|
switch key {
|
||||||
|
|
@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
|
||||||
case "InCsumErrors":
|
case "InCsumErrors":
|
||||||
procSnmp6.Udp6.InCsumErrors = &value
|
procSnmp6.Udp6.InCsumErrors = &value
|
||||||
case "IgnoredMulti":
|
case "IgnoredMulti":
|
||||||
procSnmp6.Udp6.IgnoredMulti = &value
|
procSnmp6.IgnoredMulti = &value
|
||||||
}
|
}
|
||||||
case "UdpLite6":
|
case "UdpLite6":
|
||||||
switch key {
|
switch key {
|
||||||
|
|
|
||||||
|
|
@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case "NSpid":
|
case "NSpid":
|
||||||
s.NSpids = calcNSPidsList(vString)
|
nspids, err := calcNSPidsList(vString)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.NSpids = nspids
|
||||||
case "VmPeak":
|
case "VmPeak":
|
||||||
s.VmPeak = vUintBytes
|
s.VmPeak = vUintBytes
|
||||||
case "VmSize":
|
case "VmSize":
|
||||||
|
|
@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 {
|
||||||
return g
|
return g
|
||||||
}
|
}
|
||||||
|
|
||||||
func calcNSPidsList(nspidsString string) []uint64 {
|
func calcNSPidsList(nspidsString string) ([]uint64, error) {
|
||||||
s := strings.Split(nspidsString, " ")
|
s := strings.Split(nspidsString, "\t")
|
||||||
var nspids []uint64
|
var nspids []uint64
|
||||||
|
|
||||||
for _, nspid := range s {
|
for _, nspid := range s {
|
||||||
nspid, _ := strconv.ParseUint(nspid, 10, 64)
|
nspid, err := strconv.ParseUint(nspid, 10, 64)
|
||||||
if nspid == 0 {
|
if err != nil {
|
||||||
continue
|
return nil, err
|
||||||
}
|
}
|
||||||
nspids = append(nspids, nspid)
|
nspids = append(nspids, nspid)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nspids
|
return nspids, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func sysctlToPath(sysctl string) string {
|
func sysctlToPath(sysctl string) string {
|
||||||
return strings.Replace(sysctl, ".", "/", -1)
|
return strings.ReplaceAll(sysctl, ".", "/")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
|
func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
|
||||||
|
|
|
||||||
|
|
@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||||
if len(parts) < 2 {
|
if len(parts) < 2 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
switch {
|
switch parts[0] {
|
||||||
case parts[0] == "HI:":
|
case "HI:":
|
||||||
perCPU := parts[1:]
|
perCPU := parts[1:]
|
||||||
softirqs.Hi = make([]uint64, len(perCPU))
|
softirqs.Hi = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
|
|
@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "TIMER:":
|
case "TIMER:":
|
||||||
perCPU := parts[1:]
|
perCPU := parts[1:]
|
||||||
softirqs.Timer = make([]uint64, len(perCPU))
|
softirqs.Timer = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
|
|
@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "NET_TX:":
|
case "NET_TX:":
|
||||||
perCPU := parts[1:]
|
perCPU := parts[1:]
|
||||||
softirqs.NetTx = make([]uint64, len(perCPU))
|
softirqs.NetTx = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
|
|
@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "NET_RX:":
|
case "NET_RX:":
|
||||||
perCPU := parts[1:]
|
perCPU := parts[1:]
|
||||||
softirqs.NetRx = make([]uint64, len(perCPU))
|
softirqs.NetRx = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
|
|
@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "BLOCK:":
|
case "BLOCK:":
|
||||||
perCPU := parts[1:]
|
perCPU := parts[1:]
|
||||||
softirqs.Block = make([]uint64, len(perCPU))
|
softirqs.Block = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
|
|
@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "IRQ_POLL:":
|
case "IRQ_POLL:":
|
||||||
perCPU := parts[1:]
|
perCPU := parts[1:]
|
||||||
softirqs.IRQPoll = make([]uint64, len(perCPU))
|
softirqs.IRQPoll = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
|
|
@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "TASKLET:":
|
case "TASKLET:":
|
||||||
perCPU := parts[1:]
|
perCPU := parts[1:]
|
||||||
softirqs.Tasklet = make([]uint64, len(perCPU))
|
softirqs.Tasklet = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
|
|
@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "SCHED:":
|
case "SCHED:":
|
||||||
perCPU := parts[1:]
|
perCPU := parts[1:]
|
||||||
softirqs.Sched = make([]uint64, len(perCPU))
|
softirqs.Sched = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
|
|
@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "HRTIMER:":
|
case "HRTIMER:":
|
||||||
perCPU := parts[1:]
|
perCPU := parts[1:]
|
||||||
softirqs.HRTimer = make([]uint64, len(perCPU))
|
softirqs.HRTimer = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
|
|
@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
|
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case parts[0] == "RCU:":
|
case "RCU:":
|
||||||
perCPU := parts[1:]
|
perCPU := parts[1:]
|
||||||
softirqs.RCU = make([]uint64, len(perCPU))
|
softirqs.RCU = make([]uint64, len(perCPU))
|
||||||
for i, count := range perCPU {
|
for i, count := range perCPU {
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,27 @@
|
||||||
|
# Contributing to go.opentelemetry.io/auto/sdk
|
||||||
|
|
||||||
|
The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK.
|
||||||
|
It is designed to be:
|
||||||
|
|
||||||
|
0. An OpenTelemetry compliant SDK
|
||||||
|
1. Instrumented by auto-instrumentation (serializable into OTLP JSON)
|
||||||
|
2. Lightweight
|
||||||
|
3. User-friendly
|
||||||
|
|
||||||
|
These design choices are listed in the order of their importance.
|
||||||
|
|
||||||
|
The primary design goal of this module is to be an OpenTelemetry SDK.
|
||||||
|
This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`.
|
||||||
|
|
||||||
|
Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument.
|
||||||
|
The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP.
|
||||||
|
This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent.
|
||||||
|
|
||||||
|
Outside of these first two goals, the intended use becomes relevant.
|
||||||
|
This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running.
|
||||||
|
Because of this, this package needs to not add unnecessary dependencies to that API.
|
||||||
|
Ideally, it adds none.
|
||||||
|
It also needs to operate efficiently.
|
||||||
|
|
||||||
|
Finally, this module is designed to be user-friendly to Go development.
|
||||||
|
It hides complexity in order to provide simpler APIs when the previous goals can all still be met.
|
||||||
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
@ -0,0 +1,15 @@
|
||||||
|
# Versioning
|
||||||
|
|
||||||
|
This document describes the versioning policy for this module.
|
||||||
|
This policy is designed so the following goals can be achieved.
|
||||||
|
|
||||||
|
**Users are provided a codebase of value that is stable and secure.**
|
||||||
|
|
||||||
|
## Policy
|
||||||
|
|
||||||
|
* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules).
|
||||||
|
* [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used.
|
||||||
|
* Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
|
||||||
|
* Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path.
|
||||||
|
|
||||||
|
* GitHub releases will be made for all releases.
|
||||||
|
|
@ -0,0 +1,14 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package sdk provides an auto-instrumentable OpenTelemetry SDK.
|
||||||
|
|
||||||
|
An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the
|
||||||
|
process running this SDK. In that case, all telemetry the SDK produces will be
|
||||||
|
processed and handled by that [go.opentelemetry.io/auto.Instrumentation].
|
||||||
|
|
||||||
|
By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to
|
||||||
|
auto-instrument the SDK, the SDK will not generate any telemetry.
|
||||||
|
*/
|
||||||
|
package sdk
|
||||||
|
|
@ -0,0 +1,58 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package telemetry
|
||||||
|
|
||||||
|
// Attr is a key-value pair.
|
||||||
|
type Attr struct {
|
||||||
|
Key string `json:"key,omitempty"`
|
||||||
|
Value Value `json:"value,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns an Attr for a string value.
|
||||||
|
func String(key, value string) Attr {
|
||||||
|
return Attr{key, StringValue(value)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64 returns an Attr for an int64 value.
|
||||||
|
func Int64(key string, value int64) Attr {
|
||||||
|
return Attr{key, Int64Value(value)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int returns an Attr for an int value.
|
||||||
|
func Int(key string, value int) Attr {
|
||||||
|
return Int64(key, int64(value))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 returns an Attr for a float64 value.
|
||||||
|
func Float64(key string, value float64) Attr {
|
||||||
|
return Attr{key, Float64Value(value)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool returns an Attr for a bool value.
|
||||||
|
func Bool(key string, value bool) Attr {
|
||||||
|
return Attr{key, BoolValue(value)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns an Attr for a []byte value.
|
||||||
|
// The passed slice must not be changed after it is passed.
|
||||||
|
func Bytes(key string, value []byte) Attr {
|
||||||
|
return Attr{key, BytesValue(value)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slice returns an Attr for a []Value value.
|
||||||
|
// The passed slice must not be changed after it is passed.
|
||||||
|
func Slice(key string, value ...Value) Attr {
|
||||||
|
return Attr{key, SliceValue(value...)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map returns an Attr for a map value.
|
||||||
|
// The passed slice must not be changed after it is passed.
|
||||||
|
func Map(key string, value ...Attr) Attr {
|
||||||
|
return Attr{key, MapValue(value...)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal returns if a is equal to b.
|
||||||
|
func (a Attr) Equal(b Attr) bool {
|
||||||
|
return a.Key == b.Key && a.Value.Equal(b.Value)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,8 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package telemetry provides a lightweight representations of OpenTelemetry
|
||||||
|
telemetry that is compatible with the OTLP JSON protobuf encoding.
|
||||||
|
*/
|
||||||
|
package telemetry
|
||||||
|
|
@ -0,0 +1,103 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package telemetry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
traceIDSize = 16
|
||||||
|
spanIDSize = 8
|
||||||
|
)
|
||||||
|
|
||||||
|
// TraceID is a custom data type that is used for all trace IDs.
|
||||||
|
type TraceID [traceIDSize]byte
|
||||||
|
|
||||||
|
// String returns the hex string representation form of a TraceID.
|
||||||
|
func (tid TraceID) String() string {
|
||||||
|
return hex.EncodeToString(tid[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEmpty returns false if id contains at least one non-zero byte.
|
||||||
|
func (tid TraceID) IsEmpty() bool {
|
||||||
|
return tid == [traceIDSize]byte{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON converts the trace ID into a hex string enclosed in quotes.
|
||||||
|
func (tid TraceID) MarshalJSON() ([]byte, error) {
|
||||||
|
if tid.IsEmpty() {
|
||||||
|
return []byte(`""`), nil
|
||||||
|
}
|
||||||
|
return marshalJSON(tid[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in
|
||||||
|
// quotes.
|
||||||
|
func (tid *TraceID) UnmarshalJSON(data []byte) error {
|
||||||
|
*tid = [traceIDSize]byte{}
|
||||||
|
return unmarshalJSON(tid[:], data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanID is a custom data type that is used for all span IDs.
|
||||||
|
type SpanID [spanIDSize]byte
|
||||||
|
|
||||||
|
// String returns the hex string representation form of a SpanID.
|
||||||
|
func (sid SpanID) String() string {
|
||||||
|
return hex.EncodeToString(sid[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEmpty returns true if the span ID contains at least one non-zero byte.
|
||||||
|
func (sid SpanID) IsEmpty() bool {
|
||||||
|
return sid == [spanIDSize]byte{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON converts span ID into a hex string enclosed in quotes.
|
||||||
|
func (sid SpanID) MarshalJSON() ([]byte, error) {
|
||||||
|
if sid.IsEmpty() {
|
||||||
|
return []byte(`""`), nil
|
||||||
|
}
|
||||||
|
return marshalJSON(sid[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes.
|
||||||
|
func (sid *SpanID) UnmarshalJSON(data []byte) error {
|
||||||
|
*sid = [spanIDSize]byte{}
|
||||||
|
return unmarshalJSON(sid[:], data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalJSON converts id into a hex string enclosed in quotes.
|
||||||
|
func marshalJSON(id []byte) ([]byte, error) {
|
||||||
|
// Plus 2 quote chars at the start and end.
|
||||||
|
hexLen := hex.EncodedLen(len(id)) + 2
|
||||||
|
|
||||||
|
b := make([]byte, hexLen)
|
||||||
|
hex.Encode(b[1:hexLen-1], id)
|
||||||
|
b[0], b[hexLen-1] = '"', '"'
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
|
||||||
|
func unmarshalJSON(dst []byte, src []byte) error {
|
||||||
|
if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
|
||||||
|
src = src[1 : l-1]
|
||||||
|
}
|
||||||
|
nLen := len(src)
|
||||||
|
if nLen == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dst) != hex.DecodedLen(nLen) {
|
||||||
|
return errors.New("invalid length for ID")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := hex.Decode(dst, src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,67 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package telemetry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// protoInt64 represents the protobuf encoding of integers which can be either
|
||||||
|
// strings or integers.
|
||||||
|
type protoInt64 int64
|
||||||
|
|
||||||
|
// Int64 returns the protoInt64 as an int64.
|
||||||
|
func (i *protoInt64) Int64() int64 { return int64(*i) }
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes both strings and integers.
|
||||||
|
func (i *protoInt64) UnmarshalJSON(data []byte) error {
|
||||||
|
if data[0] == '"' {
|
||||||
|
var str string
|
||||||
|
if err := json.Unmarshal(data, &str); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
parsedInt, err := strconv.ParseInt(str, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*i = protoInt64(parsedInt)
|
||||||
|
} else {
|
||||||
|
var parsedInt int64
|
||||||
|
if err := json.Unmarshal(data, &parsedInt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*i = protoInt64(parsedInt)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// protoUint64 represents the protobuf encoding of integers which can be either
|
||||||
|
// strings or integers.
|
||||||
|
type protoUint64 uint64
|
||||||
|
|
||||||
|
// Int64 returns the protoUint64 as a uint64.
|
||||||
|
func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes both strings and integers.
|
||||||
|
func (i *protoUint64) UnmarshalJSON(data []byte) error {
|
||||||
|
if data[0] == '"' {
|
||||||
|
var str string
|
||||||
|
if err := json.Unmarshal(data, &str); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
parsedUint, err := strconv.ParseUint(str, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*i = protoUint64(parsedUint)
|
||||||
|
} else {
|
||||||
|
var parsedUint uint64
|
||||||
|
if err := json.Unmarshal(data, &parsedUint); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*i = protoUint64(parsedUint)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
66
vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go
generated
vendored
Normal file
66
vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go
generated
vendored
Normal file
|
|
@ -0,0 +1,66 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package telemetry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Resource information.
|
||||||
|
type Resource struct {
|
||||||
|
// Attrs are the set of attributes that describe the resource. Attribute
|
||||||
|
// keys MUST be unique (it is not allowed to have more than one attribute
|
||||||
|
// with the same key).
|
||||||
|
Attrs []Attr `json:"attributes,omitempty"`
|
||||||
|
// DroppedAttrs is the number of dropped attributes. If the value
|
||||||
|
// is 0, then no attributes were dropped.
|
||||||
|
DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
|
||||||
|
func (r *Resource) UnmarshalJSON(data []byte) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(data))
|
||||||
|
|
||||||
|
t, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if t != json.Delim('{') {
|
||||||
|
return errors.New("invalid Resource type")
|
||||||
|
}
|
||||||
|
|
||||||
|
for decoder.More() {
|
||||||
|
keyIface, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
// Empty.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := keyIface.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("invalid Resource field: %#v", keyIface)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch key {
|
||||||
|
case "attributes":
|
||||||
|
err = decoder.Decode(&r.Attrs)
|
||||||
|
case "droppedAttributesCount", "dropped_attributes_count":
|
||||||
|
err = decoder.Decode(&r.DroppedAttrs)
|
||||||
|
default:
|
||||||
|
// Skip unknown.
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,67 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package telemetry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Scope is the identifying values of the instrumentation scope.
|
||||||
|
type Scope struct {
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
Version string `json:"version,omitempty"`
|
||||||
|
Attrs []Attr `json:"attributes,omitempty"`
|
||||||
|
DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
|
||||||
|
func (s *Scope) UnmarshalJSON(data []byte) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(data))
|
||||||
|
|
||||||
|
t, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if t != json.Delim('{') {
|
||||||
|
return errors.New("invalid Scope type")
|
||||||
|
}
|
||||||
|
|
||||||
|
for decoder.More() {
|
||||||
|
keyIface, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
// Empty.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := keyIface.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("invalid Scope field: %#v", keyIface)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch key {
|
||||||
|
case "name":
|
||||||
|
err = decoder.Decode(&s.Name)
|
||||||
|
case "version":
|
||||||
|
err = decoder.Decode(&s.Version)
|
||||||
|
case "attributes":
|
||||||
|
err = decoder.Decode(&s.Attrs)
|
||||||
|
case "droppedAttributesCount", "dropped_attributes_count":
|
||||||
|
err = decoder.Decode(&s.DroppedAttrs)
|
||||||
|
default:
|
||||||
|
// Skip unknown.
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,456 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package telemetry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Span represents a single operation performed by a single component of the
|
||||||
|
// system.
|
||||||
|
type Span struct {
|
||||||
|
// A unique identifier for a trace. All spans from the same trace share
|
||||||
|
// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
|
||||||
|
// of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
|
||||||
|
// is zero-length and thus is also invalid).
|
||||||
|
//
|
||||||
|
// This field is required.
|
||||||
|
TraceID TraceID `json:"traceId,omitempty"`
|
||||||
|
// A unique identifier for a span within a trace, assigned when the span
|
||||||
|
// is created. The ID is an 8-byte array. An ID with all zeroes OR of length
|
||||||
|
// other than 8 bytes is considered invalid (empty string in OTLP/JSON
|
||||||
|
// is zero-length and thus is also invalid).
|
||||||
|
//
|
||||||
|
// This field is required.
|
||||||
|
SpanID SpanID `json:"spanId,omitempty"`
|
||||||
|
// trace_state conveys information about request position in multiple distributed tracing graphs.
|
||||||
|
// It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
|
||||||
|
// See also https://github.com/w3c/distributed-tracing for more details about this field.
|
||||||
|
TraceState string `json:"traceState,omitempty"`
|
||||||
|
// The `span_id` of this span's parent span. If this is a root span, then this
|
||||||
|
// field must be empty. The ID is an 8-byte array.
|
||||||
|
ParentSpanID SpanID `json:"parentSpanId,omitempty"`
|
||||||
|
// Flags, a bit field.
|
||||||
|
//
|
||||||
|
// Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
|
||||||
|
// Context specification. To read the 8-bit W3C trace flag, use
|
||||||
|
// `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
|
||||||
|
//
|
||||||
|
// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
|
||||||
|
//
|
||||||
|
// Bits 8 and 9 represent the 3 states of whether a span's parent
|
||||||
|
// is remote. The states are (unknown, is not remote, is remote).
|
||||||
|
// To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
|
||||||
|
// To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
|
||||||
|
//
|
||||||
|
// When creating span messages, if the message is logically forwarded from another source
|
||||||
|
// with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
|
||||||
|
// be copied as-is. If creating from a source that does not have an equivalent flags field
|
||||||
|
// (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
|
||||||
|
// be set to zero.
|
||||||
|
// Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
|
||||||
|
//
|
||||||
|
// [Optional].
|
||||||
|
Flags uint32 `json:"flags,omitempty"`
|
||||||
|
// A description of the span's operation.
|
||||||
|
//
|
||||||
|
// For example, the name can be a qualified method name or a file name
|
||||||
|
// and a line number where the operation is called. A best practice is to use
|
||||||
|
// the same display name at the same call point in an application.
|
||||||
|
// This makes it easier to correlate spans in different traces.
|
||||||
|
//
|
||||||
|
// This field is semantically required to be set to non-empty string.
|
||||||
|
// Empty value is equivalent to an unknown span name.
|
||||||
|
//
|
||||||
|
// This field is required.
|
||||||
|
Name string `json:"name"`
|
||||||
|
// Distinguishes between spans generated in a particular context. For example,
|
||||||
|
// two spans with the same name may be distinguished using `CLIENT` (caller)
|
||||||
|
// and `SERVER` (callee) to identify queueing latency associated with the span.
|
||||||
|
Kind SpanKind `json:"kind,omitempty"`
|
||||||
|
// start_time_unix_nano is the start time of the span. On the client side, this is the time
|
||||||
|
// kept by the local machine where the span execution starts. On the server side, this
|
||||||
|
// is the time when the server's application handler starts running.
|
||||||
|
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
|
||||||
|
//
|
||||||
|
// This field is semantically required and it is expected that end_time >= start_time.
|
||||||
|
StartTime time.Time `json:"startTimeUnixNano,omitempty"`
|
||||||
|
// end_time_unix_nano is the end time of the span. On the client side, this is the time
|
||||||
|
// kept by the local machine where the span execution ends. On the server side, this
|
||||||
|
// is the time when the server application handler stops running.
|
||||||
|
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
|
||||||
|
//
|
||||||
|
// This field is semantically required and it is expected that end_time >= start_time.
|
||||||
|
EndTime time.Time `json:"endTimeUnixNano,omitempty"`
|
||||||
|
// attributes is a collection of key/value pairs. Note, global attributes
|
||||||
|
// like server name can be set using the resource API. Examples of attributes:
|
||||||
|
//
|
||||||
|
// "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
|
||||||
|
// "/http/server_latency": 300
|
||||||
|
// "example.com/myattribute": true
|
||||||
|
// "example.com/score": 10.239
|
||||||
|
//
|
||||||
|
// The OpenTelemetry API specification further restricts the allowed value types:
|
||||||
|
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
|
||||||
|
// Attribute keys MUST be unique (it is not allowed to have more than one
|
||||||
|
// attribute with the same key).
|
||||||
|
Attrs []Attr `json:"attributes,omitempty"`
|
||||||
|
// dropped_attributes_count is the number of attributes that were discarded. Attributes
|
||||||
|
// can be discarded because their keys are too long or because there are too many
|
||||||
|
// attributes. If this value is 0, then no attributes were dropped.
|
||||||
|
DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
|
||||||
|
// events is a collection of Event items.
|
||||||
|
Events []*SpanEvent `json:"events,omitempty"`
|
||||||
|
// dropped_events_count is the number of dropped events. If the value is 0, then no
|
||||||
|
// events were dropped.
|
||||||
|
DroppedEvents uint32 `json:"droppedEventsCount,omitempty"`
|
||||||
|
// links is a collection of Links, which are references from this span to a span
|
||||||
|
// in the same or different trace.
|
||||||
|
Links []*SpanLink `json:"links,omitempty"`
|
||||||
|
// dropped_links_count is the number of dropped links after the maximum size was
|
||||||
|
// enforced. If this value is 0, then no links were dropped.
|
||||||
|
DroppedLinks uint32 `json:"droppedLinksCount,omitempty"`
|
||||||
|
// An optional final status for this span. Semantically when Status isn't set, it means
|
||||||
|
// span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
|
||||||
|
Status *Status `json:"status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON encodes s into OTLP formatted JSON.
|
||||||
|
func (s Span) MarshalJSON() ([]byte, error) {
|
||||||
|
startT := s.StartTime.UnixNano()
|
||||||
|
if s.StartTime.IsZero() || startT < 0 {
|
||||||
|
startT = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
endT := s.EndTime.UnixNano()
|
||||||
|
if s.EndTime.IsZero() || endT < 0 {
|
||||||
|
endT = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Override non-empty default SpanID marshal and omitempty.
|
||||||
|
var parentSpanId string
|
||||||
|
if !s.ParentSpanID.IsEmpty() {
|
||||||
|
b := make([]byte, hex.EncodedLen(spanIDSize))
|
||||||
|
hex.Encode(b, s.ParentSpanID[:])
|
||||||
|
parentSpanId = string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Alias Span
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Alias
|
||||||
|
ParentSpanID string `json:"parentSpanId,omitempty"`
|
||||||
|
StartTime uint64 `json:"startTimeUnixNano,omitempty"`
|
||||||
|
EndTime uint64 `json:"endTimeUnixNano,omitempty"`
|
||||||
|
}{
|
||||||
|
Alias: Alias(s),
|
||||||
|
ParentSpanID: parentSpanId,
|
||||||
|
StartTime: uint64(startT),
|
||||||
|
EndTime: uint64(endT),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s.
|
||||||
|
func (s *Span) UnmarshalJSON(data []byte) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(data))
|
||||||
|
|
||||||
|
t, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if t != json.Delim('{') {
|
||||||
|
return errors.New("invalid Span type")
|
||||||
|
}
|
||||||
|
|
||||||
|
for decoder.More() {
|
||||||
|
keyIface, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
// Empty.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := keyIface.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("invalid Span field: %#v", keyIface)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch key {
|
||||||
|
case "traceId", "trace_id":
|
||||||
|
err = decoder.Decode(&s.TraceID)
|
||||||
|
case "spanId", "span_id":
|
||||||
|
err = decoder.Decode(&s.SpanID)
|
||||||
|
case "traceState", "trace_state":
|
||||||
|
err = decoder.Decode(&s.TraceState)
|
||||||
|
case "parentSpanId", "parent_span_id":
|
||||||
|
err = decoder.Decode(&s.ParentSpanID)
|
||||||
|
case "flags":
|
||||||
|
err = decoder.Decode(&s.Flags)
|
||||||
|
case "name":
|
||||||
|
err = decoder.Decode(&s.Name)
|
||||||
|
case "kind":
|
||||||
|
err = decoder.Decode(&s.Kind)
|
||||||
|
case "startTimeUnixNano", "start_time_unix_nano":
|
||||||
|
var val protoUint64
|
||||||
|
err = decoder.Decode(&val)
|
||||||
|
s.StartTime = time.Unix(0, int64(val.Uint64()))
|
||||||
|
case "endTimeUnixNano", "end_time_unix_nano":
|
||||||
|
var val protoUint64
|
||||||
|
err = decoder.Decode(&val)
|
||||||
|
s.EndTime = time.Unix(0, int64(val.Uint64()))
|
||||||
|
case "attributes":
|
||||||
|
err = decoder.Decode(&s.Attrs)
|
||||||
|
case "droppedAttributesCount", "dropped_attributes_count":
|
||||||
|
err = decoder.Decode(&s.DroppedAttrs)
|
||||||
|
case "events":
|
||||||
|
err = decoder.Decode(&s.Events)
|
||||||
|
case "droppedEventsCount", "dropped_events_count":
|
||||||
|
err = decoder.Decode(&s.DroppedEvents)
|
||||||
|
case "links":
|
||||||
|
err = decoder.Decode(&s.Links)
|
||||||
|
case "droppedLinksCount", "dropped_links_count":
|
||||||
|
err = decoder.Decode(&s.DroppedLinks)
|
||||||
|
case "status":
|
||||||
|
err = decoder.Decode(&s.Status)
|
||||||
|
default:
|
||||||
|
// Skip unknown.
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanFlags represents constants used to interpret the
|
||||||
|
// Span.flags field, which is protobuf 'fixed32' type and is to
|
||||||
|
// be used as bit-fields. Each non-zero value defined in this enum is
|
||||||
|
// a bit-mask. To extract the bit-field, for example, use an
|
||||||
|
// expression like:
|
||||||
|
//
|
||||||
|
// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
|
||||||
|
//
|
||||||
|
// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
|
||||||
|
//
|
||||||
|
// Note that Span flags were introduced in version 1.1 of the
|
||||||
|
// OpenTelemetry protocol. Older Span producers do not set this
|
||||||
|
// field, consequently consumers should not rely on the absence of a
|
||||||
|
// particular flag bit to indicate the presence of a particular feature.
|
||||||
|
type SpanFlags int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Bits 0-7 are used for trace flags.
|
||||||
|
SpanFlagsTraceFlagsMask SpanFlags = 255
|
||||||
|
// Bits 8 and 9 are used to indicate that the parent span or link span is remote.
|
||||||
|
// Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
|
||||||
|
// Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
|
||||||
|
SpanFlagsContextHasIsRemoteMask SpanFlags = 256
|
||||||
|
// SpanFlagsContextHasIsRemoteMask indicates the Span is remote.
|
||||||
|
SpanFlagsContextIsRemoteMask SpanFlags = 512
|
||||||
|
)
|
||||||
|
|
||||||
|
// SpanKind is the type of span. Can be used to specify additional relationships between spans
|
||||||
|
// in addition to a parent/child relationship.
|
||||||
|
type SpanKind int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Indicates that the span represents an internal operation within an application,
|
||||||
|
// as opposed to an operation happening at the boundaries. Default value.
|
||||||
|
SpanKindInternal SpanKind = 1
|
||||||
|
// Indicates that the span covers server-side handling of an RPC or other
|
||||||
|
// remote network request.
|
||||||
|
SpanKindServer SpanKind = 2
|
||||||
|
// Indicates that the span describes a request to some remote service.
|
||||||
|
SpanKindClient SpanKind = 3
|
||||||
|
// Indicates that the span describes a producer sending a message to a broker.
|
||||||
|
// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
|
||||||
|
// between producer and consumer spans. A PRODUCER span ends when the message was accepted
|
||||||
|
// by the broker while the logical processing of the message might span a much longer time.
|
||||||
|
SpanKindProducer SpanKind = 4
|
||||||
|
// Indicates that the span describes consumer receiving a message from a broker.
|
||||||
|
// Like the PRODUCER kind, there is often no direct critical path latency relationship
|
||||||
|
// between producer and consumer spans.
|
||||||
|
SpanKindConsumer SpanKind = 5
|
||||||
|
)
|
||||||
|
|
||||||
|
// Event is a time-stamped annotation of the span, consisting of user-supplied
|
||||||
|
// text description and key-value pairs.
|
||||||
|
type SpanEvent struct {
|
||||||
|
// time_unix_nano is the time the event occurred.
|
||||||
|
Time time.Time `json:"timeUnixNano,omitempty"`
|
||||||
|
// name of the event.
|
||||||
|
// This field is semantically required to be set to non-empty string.
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
// attributes is a collection of attribute key/value pairs on the event.
|
||||||
|
// Attribute keys MUST be unique (it is not allowed to have more than one
|
||||||
|
// attribute with the same key).
|
||||||
|
Attrs []Attr `json:"attributes,omitempty"`
|
||||||
|
// dropped_attributes_count is the number of dropped attributes. If the value is 0,
|
||||||
|
// then no attributes were dropped.
|
||||||
|
DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON encodes e into OTLP formatted JSON.
|
||||||
|
func (e SpanEvent) MarshalJSON() ([]byte, error) {
|
||||||
|
t := e.Time.UnixNano()
|
||||||
|
if e.Time.IsZero() || t < 0 {
|
||||||
|
t = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type Alias SpanEvent
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Alias
|
||||||
|
Time uint64 `json:"timeUnixNano,omitempty"`
|
||||||
|
}{
|
||||||
|
Alias: Alias(e),
|
||||||
|
Time: uint64(t),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se.
|
||||||
|
func (se *SpanEvent) UnmarshalJSON(data []byte) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(data))
|
||||||
|
|
||||||
|
t, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if t != json.Delim('{') {
|
||||||
|
return errors.New("invalid SpanEvent type")
|
||||||
|
}
|
||||||
|
|
||||||
|
for decoder.More() {
|
||||||
|
keyIface, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
// Empty.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := keyIface.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("invalid SpanEvent field: %#v", keyIface)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch key {
|
||||||
|
case "timeUnixNano", "time_unix_nano":
|
||||||
|
var val protoUint64
|
||||||
|
err = decoder.Decode(&val)
|
||||||
|
se.Time = time.Unix(0, int64(val.Uint64()))
|
||||||
|
case "name":
|
||||||
|
err = decoder.Decode(&se.Name)
|
||||||
|
case "attributes":
|
||||||
|
err = decoder.Decode(&se.Attrs)
|
||||||
|
case "droppedAttributesCount", "dropped_attributes_count":
|
||||||
|
err = decoder.Decode(&se.DroppedAttrs)
|
||||||
|
default:
|
||||||
|
// Skip unknown.
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A pointer from the current span to another span in the same trace or in a
|
||||||
|
// different trace. For example, this can be used in batching operations,
|
||||||
|
// where a single batch handler processes multiple requests from different
|
||||||
|
// traces or when the handler receives a request from a different project.
|
||||||
|
type SpanLink struct {
|
||||||
|
// A unique identifier of a trace that this linked span is part of. The ID is a
|
||||||
|
// 16-byte array.
|
||||||
|
TraceID TraceID `json:"traceId,omitempty"`
|
||||||
|
// A unique identifier for the linked span. The ID is an 8-byte array.
|
||||||
|
SpanID SpanID `json:"spanId,omitempty"`
|
||||||
|
// The trace_state associated with the link.
|
||||||
|
TraceState string `json:"traceState,omitempty"`
|
||||||
|
// attributes is a collection of attribute key/value pairs on the link.
|
||||||
|
// Attribute keys MUST be unique (it is not allowed to have more than one
|
||||||
|
// attribute with the same key).
|
||||||
|
Attrs []Attr `json:"attributes,omitempty"`
|
||||||
|
// dropped_attributes_count is the number of dropped attributes. If the value is 0,
|
||||||
|
// then no attributes were dropped.
|
||||||
|
DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
|
||||||
|
// Flags, a bit field.
|
||||||
|
//
|
||||||
|
// Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
|
||||||
|
// Context specification. To read the 8-bit W3C trace flag, use
|
||||||
|
// `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
|
||||||
|
//
|
||||||
|
// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
|
||||||
|
//
|
||||||
|
// Bits 8 and 9 represent the 3 states of whether the link is remote.
|
||||||
|
// The states are (unknown, is not remote, is remote).
|
||||||
|
// To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
|
||||||
|
// To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
|
||||||
|
//
|
||||||
|
// Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
|
||||||
|
// When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
|
||||||
|
//
|
||||||
|
// [Optional].
|
||||||
|
Flags uint32 `json:"flags,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl.
|
||||||
|
func (sl *SpanLink) UnmarshalJSON(data []byte) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(data))
|
||||||
|
|
||||||
|
t, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if t != json.Delim('{') {
|
||||||
|
return errors.New("invalid SpanLink type")
|
||||||
|
}
|
||||||
|
|
||||||
|
for decoder.More() {
|
||||||
|
keyIface, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
// Empty.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := keyIface.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("invalid SpanLink field: %#v", keyIface)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch key {
|
||||||
|
case "traceId", "trace_id":
|
||||||
|
err = decoder.Decode(&sl.TraceID)
|
||||||
|
case "spanId", "span_id":
|
||||||
|
err = decoder.Decode(&sl.SpanID)
|
||||||
|
case "traceState", "trace_state":
|
||||||
|
err = decoder.Decode(&sl.TraceState)
|
||||||
|
case "attributes":
|
||||||
|
err = decoder.Decode(&sl.Attrs)
|
||||||
|
case "droppedAttributesCount", "dropped_attributes_count":
|
||||||
|
err = decoder.Decode(&sl.DroppedAttrs)
|
||||||
|
case "flags":
|
||||||
|
err = decoder.Decode(&sl.Flags)
|
||||||
|
default:
|
||||||
|
// Skip unknown.
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,40 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package telemetry
|
||||||
|
|
||||||
|
// For the semantics of status codes see
|
||||||
|
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
|
||||||
|
type StatusCode int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// The default status.
|
||||||
|
StatusCodeUnset StatusCode = 0
|
||||||
|
// The Span has been validated by an Application developer or Operator to
|
||||||
|
// have completed successfully.
|
||||||
|
StatusCodeOK StatusCode = 1
|
||||||
|
// The Span contains an error.
|
||||||
|
StatusCodeError StatusCode = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
var statusCodeStrings = []string{
|
||||||
|
"Unset",
|
||||||
|
"OK",
|
||||||
|
"Error",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s StatusCode) String() string {
|
||||||
|
if s >= 0 && int(s) < len(statusCodeStrings) {
|
||||||
|
return statusCodeStrings[s]
|
||||||
|
}
|
||||||
|
return "<unknown telemetry.StatusCode>"
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Status type defines a logical error model that is suitable for different
|
||||||
|
// programming environments, including REST APIs and RPC APIs.
|
||||||
|
type Status struct {
|
||||||
|
// A developer-facing human readable error message.
|
||||||
|
Message string `json:"message,omitempty"`
|
||||||
|
// The status code.
|
||||||
|
Code StatusCode `json:"code,omitempty"`
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,189 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package telemetry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Traces represents the traces data that can be stored in a persistent storage,
|
||||||
|
// OR can be embedded by other protocols that transfer OTLP traces data but do
|
||||||
|
// not implement the OTLP protocol.
|
||||||
|
//
|
||||||
|
// The main difference between this message and collector protocol is that
|
||||||
|
// in this message there will not be any "control" or "metadata" specific to
|
||||||
|
// OTLP protocol.
|
||||||
|
//
|
||||||
|
// When new fields are added into this message, the OTLP request MUST be updated
|
||||||
|
// as well.
|
||||||
|
type Traces struct {
|
||||||
|
// An array of ResourceSpans.
|
||||||
|
// For data coming from a single resource this array will typically contain
|
||||||
|
// one element. Intermediary nodes that receive data from multiple origins
|
||||||
|
// typically batch the data before forwarding further and in that case this
|
||||||
|
// array will contain multiple elements.
|
||||||
|
ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td.
|
||||||
|
func (td *Traces) UnmarshalJSON(data []byte) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(data))
|
||||||
|
|
||||||
|
t, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if t != json.Delim('{') {
|
||||||
|
return errors.New("invalid TracesData type")
|
||||||
|
}
|
||||||
|
|
||||||
|
for decoder.More() {
|
||||||
|
keyIface, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
// Empty.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := keyIface.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("invalid TracesData field: %#v", keyIface)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch key {
|
||||||
|
case "resourceSpans", "resource_spans":
|
||||||
|
err = decoder.Decode(&td.ResourceSpans)
|
||||||
|
default:
|
||||||
|
// Skip unknown.
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A collection of ScopeSpans from a Resource.
|
||||||
|
type ResourceSpans struct {
|
||||||
|
// The resource for the spans in this message.
|
||||||
|
// If this field is not set then no resource info is known.
|
||||||
|
Resource Resource `json:"resource"`
|
||||||
|
// A list of ScopeSpans that originate from a resource.
|
||||||
|
ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"`
|
||||||
|
// This schema_url applies to the data in the "resource" field. It does not apply
|
||||||
|
// to the data in the "scope_spans" field which have their own schema_url field.
|
||||||
|
SchemaURL string `json:"schemaUrl,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs.
|
||||||
|
func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(data))
|
||||||
|
|
||||||
|
t, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if t != json.Delim('{') {
|
||||||
|
return errors.New("invalid ResourceSpans type")
|
||||||
|
}
|
||||||
|
|
||||||
|
for decoder.More() {
|
||||||
|
keyIface, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
// Empty.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := keyIface.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch key {
|
||||||
|
case "resource":
|
||||||
|
err = decoder.Decode(&rs.Resource)
|
||||||
|
case "scopeSpans", "scope_spans":
|
||||||
|
err = decoder.Decode(&rs.ScopeSpans)
|
||||||
|
case "schemaUrl", "schema_url":
|
||||||
|
err = decoder.Decode(&rs.SchemaURL)
|
||||||
|
default:
|
||||||
|
// Skip unknown.
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A collection of Spans produced by an InstrumentationScope.
|
||||||
|
type ScopeSpans struct {
|
||||||
|
// The instrumentation scope information for the spans in this message.
|
||||||
|
// Semantically when InstrumentationScope isn't set, it is equivalent with
|
||||||
|
// an empty instrumentation scope name (unknown).
|
||||||
|
Scope *Scope `json:"scope"`
|
||||||
|
// A list of Spans that originate from an instrumentation scope.
|
||||||
|
Spans []*Span `json:"spans,omitempty"`
|
||||||
|
// The Schema URL, if known. This is the identifier of the Schema that the span data
|
||||||
|
// is recorded in. To learn more about Schema URL see
|
||||||
|
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
|
||||||
|
// This schema_url applies to all spans and span events in the "spans" field.
|
||||||
|
SchemaURL string `json:"schemaUrl,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss.
|
||||||
|
func (ss *ScopeSpans) UnmarshalJSON(data []byte) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(data))
|
||||||
|
|
||||||
|
t, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if t != json.Delim('{') {
|
||||||
|
return errors.New("invalid ScopeSpans type")
|
||||||
|
}
|
||||||
|
|
||||||
|
for decoder.More() {
|
||||||
|
keyIface, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
// Empty.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := keyIface.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch key {
|
||||||
|
case "scope":
|
||||||
|
err = decoder.Decode(&ss.Scope)
|
||||||
|
case "spans":
|
||||||
|
err = decoder.Decode(&ss.Spans)
|
||||||
|
case "schemaUrl", "schema_url":
|
||||||
|
err = decoder.Decode(&ss.SchemaURL)
|
||||||
|
default:
|
||||||
|
// Skip unknown.
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,452 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
//go:generate stringer -type=ValueKind -trimprefix=ValueKind
|
||||||
|
|
||||||
|
package telemetry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"cmp"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Value represents a structured value.
|
||||||
|
// A zero value is valid and represents an empty value.
|
||||||
|
type Value struct {
|
||||||
|
// Ensure forward compatibility by explicitly making this not comparable.
|
||||||
|
noCmp [0]func() //nolint: unused // This is indeed used.
|
||||||
|
|
||||||
|
// num holds the value for Int64, Float64, and Bool. It holds the length
|
||||||
|
// for String, Bytes, Slice, Map.
|
||||||
|
num uint64
|
||||||
|
// any holds either the KindBool, KindInt64, KindFloat64, stringptr,
|
||||||
|
// bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64
|
||||||
|
// then the value of Value is in num as described above. Otherwise, it
|
||||||
|
// contains the value wrapped in the appropriate type.
|
||||||
|
any any
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// sliceptr represents a value in Value.any for KindString Values.
|
||||||
|
stringptr *byte
|
||||||
|
// bytesptr represents a value in Value.any for KindBytes Values.
|
||||||
|
bytesptr *byte
|
||||||
|
// sliceptr represents a value in Value.any for KindSlice Values.
|
||||||
|
sliceptr *Value
|
||||||
|
// mapptr represents a value in Value.any for KindMap Values.
|
||||||
|
mapptr *Attr
|
||||||
|
)
|
||||||
|
|
||||||
|
// ValueKind is the kind of a [Value].
|
||||||
|
type ValueKind int
|
||||||
|
|
||||||
|
// ValueKind values.
|
||||||
|
const (
|
||||||
|
ValueKindEmpty ValueKind = iota
|
||||||
|
ValueKindBool
|
||||||
|
ValueKindFloat64
|
||||||
|
ValueKindInt64
|
||||||
|
ValueKindString
|
||||||
|
ValueKindBytes
|
||||||
|
ValueKindSlice
|
||||||
|
ValueKindMap
|
||||||
|
)
|
||||||
|
|
||||||
|
var valueKindStrings = []string{
|
||||||
|
"Empty",
|
||||||
|
"Bool",
|
||||||
|
"Float64",
|
||||||
|
"Int64",
|
||||||
|
"String",
|
||||||
|
"Bytes",
|
||||||
|
"Slice",
|
||||||
|
"Map",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k ValueKind) String() string {
|
||||||
|
if k >= 0 && int(k) < len(valueKindStrings) {
|
||||||
|
return valueKindStrings[k]
|
||||||
|
}
|
||||||
|
return "<unknown telemetry.ValueKind>"
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringValue returns a new [Value] for a string.
|
||||||
|
func StringValue(v string) Value {
|
||||||
|
return Value{
|
||||||
|
num: uint64(len(v)),
|
||||||
|
any: stringptr(unsafe.StringData(v)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntValue returns a [Value] for an int.
|
||||||
|
func IntValue(v int) Value { return Int64Value(int64(v)) }
|
||||||
|
|
||||||
|
// Int64Value returns a [Value] for an int64.
|
||||||
|
func Int64Value(v int64) Value {
|
||||||
|
return Value{num: uint64(v), any: ValueKindInt64}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Value returns a [Value] for a float64.
|
||||||
|
func Float64Value(v float64) Value {
|
||||||
|
return Value{num: math.Float64bits(v), any: ValueKindFloat64}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolValue returns a [Value] for a bool.
|
||||||
|
func BoolValue(v bool) Value { //nolint:revive // Not a control flag.
|
||||||
|
var n uint64
|
||||||
|
if v {
|
||||||
|
n = 1
|
||||||
|
}
|
||||||
|
return Value{num: n, any: ValueKindBool}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesValue returns a [Value] for a byte slice. The passed slice must not be
|
||||||
|
// changed after it is passed.
|
||||||
|
func BytesValue(v []byte) Value {
|
||||||
|
return Value{
|
||||||
|
num: uint64(len(v)),
|
||||||
|
any: bytesptr(unsafe.SliceData(v)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SliceValue returns a [Value] for a slice of [Value]. The passed slice must
|
||||||
|
// not be changed after it is passed.
|
||||||
|
func SliceValue(vs ...Value) Value {
|
||||||
|
return Value{
|
||||||
|
num: uint64(len(vs)),
|
||||||
|
any: sliceptr(unsafe.SliceData(vs)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapValue returns a new [Value] for a slice of key-value pairs. The passed
|
||||||
|
// slice must not be changed after it is passed.
|
||||||
|
func MapValue(kvs ...Attr) Value {
|
||||||
|
return Value{
|
||||||
|
num: uint64(len(kvs)),
|
||||||
|
any: mapptr(unsafe.SliceData(kvs)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsString returns the value held by v as a string.
|
||||||
|
func (v Value) AsString() string {
|
||||||
|
if sp, ok := v.any.(stringptr); ok {
|
||||||
|
return unsafe.String(sp, v.num)
|
||||||
|
}
|
||||||
|
// TODO: error handle
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// asString returns the value held by v as a string. It will panic if the Value
|
||||||
|
// is not KindString.
|
||||||
|
func (v Value) asString() string {
|
||||||
|
return unsafe.String(v.any.(stringptr), v.num)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsInt64 returns the value held by v as an int64.
|
||||||
|
func (v Value) AsInt64() int64 {
|
||||||
|
if v.Kind() != ValueKindInt64 {
|
||||||
|
// TODO: error handle
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return v.asInt64()
|
||||||
|
}
|
||||||
|
|
||||||
|
// asInt64 returns the value held by v as an int64. If v is not of KindInt64,
|
||||||
|
// this will return garbage.
|
||||||
|
func (v Value) asInt64() int64 {
|
||||||
|
// Assumes v.num was a valid int64 (overflow not checked).
|
||||||
|
return int64(v.num) // nolint: gosec
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsBool returns the value held by v as a bool.
|
||||||
|
func (v Value) AsBool() bool {
|
||||||
|
if v.Kind() != ValueKindBool {
|
||||||
|
// TODO: error handle
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return v.asBool()
|
||||||
|
}
|
||||||
|
|
||||||
|
// asBool returns the value held by v as a bool. If v is not of KindBool, this
|
||||||
|
// will return garbage.
|
||||||
|
func (v Value) asBool() bool { return v.num == 1 }
|
||||||
|
|
||||||
|
// AsFloat64 returns the value held by v as a float64.
|
||||||
|
func (v Value) AsFloat64() float64 {
|
||||||
|
if v.Kind() != ValueKindFloat64 {
|
||||||
|
// TODO: error handle
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return v.asFloat64()
|
||||||
|
}
|
||||||
|
|
||||||
|
// asFloat64 returns the value held by v as a float64. If v is not of
|
||||||
|
// KindFloat64, this will return garbage.
|
||||||
|
func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) }
|
||||||
|
|
||||||
|
// AsBytes returns the value held by v as a []byte.
|
||||||
|
func (v Value) AsBytes() []byte {
|
||||||
|
if sp, ok := v.any.(bytesptr); ok {
|
||||||
|
return unsafe.Slice((*byte)(sp), v.num)
|
||||||
|
}
|
||||||
|
// TODO: error handle
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// asBytes returns the value held by v as a []byte. It will panic if the Value
|
||||||
|
// is not KindBytes.
|
||||||
|
func (v Value) asBytes() []byte {
|
||||||
|
return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsSlice returns the value held by v as a []Value.
|
||||||
|
func (v Value) AsSlice() []Value {
|
||||||
|
if sp, ok := v.any.(sliceptr); ok {
|
||||||
|
return unsafe.Slice((*Value)(sp), v.num)
|
||||||
|
}
|
||||||
|
// TODO: error handle
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// asSlice returns the value held by v as a []Value. It will panic if the Value
|
||||||
|
// is not KindSlice.
|
||||||
|
func (v Value) asSlice() []Value {
|
||||||
|
return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsMap returns the value held by v as a []Attr.
|
||||||
|
func (v Value) AsMap() []Attr {
|
||||||
|
if sp, ok := v.any.(mapptr); ok {
|
||||||
|
return unsafe.Slice((*Attr)(sp), v.num)
|
||||||
|
}
|
||||||
|
// TODO: error handle
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// asMap returns the value held by v as a []Attr. It will panic if the
|
||||||
|
// Value is not KindMap.
|
||||||
|
func (v Value) asMap() []Attr {
|
||||||
|
return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kind returns the Kind of v.
|
||||||
|
func (v Value) Kind() ValueKind {
|
||||||
|
switch x := v.any.(type) {
|
||||||
|
case ValueKind:
|
||||||
|
return x
|
||||||
|
case stringptr:
|
||||||
|
return ValueKindString
|
||||||
|
case bytesptr:
|
||||||
|
return ValueKindBytes
|
||||||
|
case sliceptr:
|
||||||
|
return ValueKindSlice
|
||||||
|
case mapptr:
|
||||||
|
return ValueKindMap
|
||||||
|
default:
|
||||||
|
return ValueKindEmpty
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty returns if v does not hold any value.
|
||||||
|
func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty }
|
||||||
|
|
||||||
|
// Equal returns if v is equal to w.
|
||||||
|
func (v Value) Equal(w Value) bool {
|
||||||
|
k1 := v.Kind()
|
||||||
|
k2 := w.Kind()
|
||||||
|
if k1 != k2 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
switch k1 {
|
||||||
|
case ValueKindInt64, ValueKindBool:
|
||||||
|
return v.num == w.num
|
||||||
|
case ValueKindString:
|
||||||
|
return v.asString() == w.asString()
|
||||||
|
case ValueKindFloat64:
|
||||||
|
return v.asFloat64() == w.asFloat64()
|
||||||
|
case ValueKindSlice:
|
||||||
|
return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal)
|
||||||
|
case ValueKindMap:
|
||||||
|
sv := sortMap(v.asMap())
|
||||||
|
sw := sortMap(w.asMap())
|
||||||
|
return slices.EqualFunc(sv, sw, Attr.Equal)
|
||||||
|
case ValueKindBytes:
|
||||||
|
return bytes.Equal(v.asBytes(), w.asBytes())
|
||||||
|
case ValueKindEmpty:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
// TODO: error handle
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortMap(m []Attr) []Attr {
|
||||||
|
sm := make([]Attr, len(m))
|
||||||
|
copy(sm, m)
|
||||||
|
slices.SortFunc(sm, func(a, b Attr) int {
|
||||||
|
return cmp.Compare(a.Key, b.Key)
|
||||||
|
})
|
||||||
|
|
||||||
|
return sm
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns Value's value as a string, formatted like [fmt.Sprint].
|
||||||
|
//
|
||||||
|
// The returned string is meant for debugging;
|
||||||
|
// the string representation is not stable.
|
||||||
|
func (v Value) String() string {
|
||||||
|
switch v.Kind() {
|
||||||
|
case ValueKindString:
|
||||||
|
return v.asString()
|
||||||
|
case ValueKindInt64:
|
||||||
|
// Assumes v.num was a valid int64 (overflow not checked).
|
||||||
|
return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
|
||||||
|
case ValueKindFloat64:
|
||||||
|
return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
|
||||||
|
case ValueKindBool:
|
||||||
|
return strconv.FormatBool(v.asBool())
|
||||||
|
case ValueKindBytes:
|
||||||
|
return fmt.Sprint(v.asBytes())
|
||||||
|
case ValueKindMap:
|
||||||
|
return fmt.Sprint(v.asMap())
|
||||||
|
case ValueKindSlice:
|
||||||
|
return fmt.Sprint(v.asSlice())
|
||||||
|
case ValueKindEmpty:
|
||||||
|
return "<nil>"
|
||||||
|
default:
|
||||||
|
// Try to handle this as gracefully as possible.
|
||||||
|
//
|
||||||
|
// Don't panic here. The goal here is to have developers find this
|
||||||
|
// first if a slog.Kind is is not handled. It is
|
||||||
|
// preferable to have user's open issue asking why their attributes
|
||||||
|
// have a "unhandled: " prefix than say that their code is panicking.
|
||||||
|
return fmt.Sprintf("<unhandled telemetry.ValueKind: %s>", v.Kind())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON encodes v into OTLP formatted JSON.
|
||||||
|
func (v *Value) MarshalJSON() ([]byte, error) {
|
||||||
|
switch v.Kind() {
|
||||||
|
case ValueKindString:
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Value string `json:"stringValue"`
|
||||||
|
}{v.asString()})
|
||||||
|
case ValueKindInt64:
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Value string `json:"intValue"`
|
||||||
|
}{strconv.FormatInt(int64(v.num), 10)})
|
||||||
|
case ValueKindFloat64:
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Value float64 `json:"doubleValue"`
|
||||||
|
}{v.asFloat64()})
|
||||||
|
case ValueKindBool:
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Value bool `json:"boolValue"`
|
||||||
|
}{v.asBool()})
|
||||||
|
case ValueKindBytes:
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Value []byte `json:"bytesValue"`
|
||||||
|
}{v.asBytes()})
|
||||||
|
case ValueKindMap:
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Value struct {
|
||||||
|
Values []Attr `json:"values"`
|
||||||
|
} `json:"kvlistValue"`
|
||||||
|
}{struct {
|
||||||
|
Values []Attr `json:"values"`
|
||||||
|
}{v.asMap()}})
|
||||||
|
case ValueKindSlice:
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Value struct {
|
||||||
|
Values []Value `json:"values"`
|
||||||
|
} `json:"arrayValue"`
|
||||||
|
}{struct {
|
||||||
|
Values []Value `json:"values"`
|
||||||
|
}{v.asSlice()}})
|
||||||
|
case ValueKindEmpty:
|
||||||
|
return nil, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v.
|
||||||
|
func (v *Value) UnmarshalJSON(data []byte) error {
|
||||||
|
decoder := json.NewDecoder(bytes.NewReader(data))
|
||||||
|
|
||||||
|
t, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if t != json.Delim('{') {
|
||||||
|
return errors.New("invalid Value type")
|
||||||
|
}
|
||||||
|
|
||||||
|
for decoder.More() {
|
||||||
|
keyIface, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
// Empty.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := keyIface.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("invalid Value key: %#v", keyIface)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch key {
|
||||||
|
case "stringValue", "string_value":
|
||||||
|
var val string
|
||||||
|
err = decoder.Decode(&val)
|
||||||
|
*v = StringValue(val)
|
||||||
|
case "boolValue", "bool_value":
|
||||||
|
var val bool
|
||||||
|
err = decoder.Decode(&val)
|
||||||
|
*v = BoolValue(val)
|
||||||
|
case "intValue", "int_value":
|
||||||
|
var val protoInt64
|
||||||
|
err = decoder.Decode(&val)
|
||||||
|
*v = Int64Value(val.Int64())
|
||||||
|
case "doubleValue", "double_value":
|
||||||
|
var val float64
|
||||||
|
err = decoder.Decode(&val)
|
||||||
|
*v = Float64Value(val)
|
||||||
|
case "bytesValue", "bytes_value":
|
||||||
|
var val64 string
|
||||||
|
if err := decoder.Decode(&val64); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var val []byte
|
||||||
|
val, err = base64.StdEncoding.DecodeString(val64)
|
||||||
|
*v = BytesValue(val)
|
||||||
|
case "arrayValue", "array_value":
|
||||||
|
var val struct{ Values []Value }
|
||||||
|
err = decoder.Decode(&val)
|
||||||
|
*v = SliceValue(val.Values...)
|
||||||
|
case "kvlistValue", "kvlist_value":
|
||||||
|
var val struct{ Values []Attr }
|
||||||
|
err = decoder.Decode(&val)
|
||||||
|
*v = MapValue(val.Values...)
|
||||||
|
default:
|
||||||
|
// Skip unknown.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Use first valid. Ignore the rest.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only unknown fields. Return nil without unmarshaling any value.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,94 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package sdk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// maxSpan are the span limits resolved during startup.
|
||||||
|
var maxSpan = newSpanLimits()
|
||||||
|
|
||||||
|
type spanLimits struct {
|
||||||
|
// Attrs is the number of allowed attributes for a span.
|
||||||
|
//
|
||||||
|
// This is resolved from the environment variable value for the
|
||||||
|
// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
|
||||||
|
// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if
|
||||||
|
// that is not set, is used.
|
||||||
|
Attrs int
|
||||||
|
// AttrValueLen is the maximum attribute value length allowed for a span.
|
||||||
|
//
|
||||||
|
// This is resolved from the environment variable value for the
|
||||||
|
// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
|
||||||
|
// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1
|
||||||
|
// if that is not set, is used.
|
||||||
|
AttrValueLen int
|
||||||
|
// Events is the number of allowed events for a span.
|
||||||
|
//
|
||||||
|
// This is resolved from the environment variable value for the
|
||||||
|
// OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set.
|
||||||
|
Events int
|
||||||
|
// EventAttrs is the number of allowed attributes for a span event.
|
||||||
|
//
|
||||||
|
// The is resolved from the environment variable value for the
|
||||||
|
// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set.
|
||||||
|
EventAttrs int
|
||||||
|
// Links is the number of allowed Links for a span.
|
||||||
|
//
|
||||||
|
// This is resolved from the environment variable value for the
|
||||||
|
// OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set.
|
||||||
|
Links int
|
||||||
|
// LinkAttrs is the number of allowed attributes for a span link.
|
||||||
|
//
|
||||||
|
// This is resolved from the environment variable value for the
|
||||||
|
// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set.
|
||||||
|
LinkAttrs int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSpanLimits() spanLimits {
|
||||||
|
return spanLimits{
|
||||||
|
Attrs: firstEnv(
|
||||||
|
128,
|
||||||
|
"OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT",
|
||||||
|
"OTEL_ATTRIBUTE_COUNT_LIMIT",
|
||||||
|
),
|
||||||
|
AttrValueLen: firstEnv(
|
||||||
|
-1, // Unlimited.
|
||||||
|
"OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT",
|
||||||
|
"OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT",
|
||||||
|
),
|
||||||
|
Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"),
|
||||||
|
EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"),
|
||||||
|
Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"),
|
||||||
|
LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// firstEnv returns the parsed integer value of the first matching environment
|
||||||
|
// variable from keys. The defaultVal is returned if the value is not an
|
||||||
|
// integer or no match is found.
|
||||||
|
func firstEnv(defaultVal int, keys ...string) int {
|
||||||
|
for _, key := range keys {
|
||||||
|
strV := os.Getenv(key)
|
||||||
|
if strV == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := strconv.Atoi(strV)
|
||||||
|
if err == nil {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
slog.Warn(
|
||||||
|
"invalid limit environment variable",
|
||||||
|
"error", err,
|
||||||
|
"key", key,
|
||||||
|
"value", strV,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,432 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package sdk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
"go.opentelemetry.io/otel/codes"
|
||||||
|
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
"go.opentelemetry.io/otel/trace/noop"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/auto/sdk/internal/telemetry"
|
||||||
|
)
|
||||||
|
|
||||||
|
type span struct {
|
||||||
|
noop.Span
|
||||||
|
|
||||||
|
spanContext trace.SpanContext
|
||||||
|
sampled atomic.Bool
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
traces *telemetry.Traces
|
||||||
|
span *telemetry.Span
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) SpanContext() trace.SpanContext {
|
||||||
|
if s == nil {
|
||||||
|
return trace.SpanContext{}
|
||||||
|
}
|
||||||
|
// s.spanContext is immutable, do not acquire lock s.mu.
|
||||||
|
return s.spanContext
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) IsRecording() bool {
|
||||||
|
if s == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.sampled.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) SetStatus(c codes.Code, msg string) {
|
||||||
|
if s == nil || !s.sampled.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
if s.span.Status == nil {
|
||||||
|
s.span.Status = new(telemetry.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.span.Status.Message = msg
|
||||||
|
|
||||||
|
switch c {
|
||||||
|
case codes.Unset:
|
||||||
|
s.span.Status.Code = telemetry.StatusCodeUnset
|
||||||
|
case codes.Error:
|
||||||
|
s.span.Status.Code = telemetry.StatusCodeError
|
||||||
|
case codes.Ok:
|
||||||
|
s.span.Status.Code = telemetry.StatusCodeOK
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
|
||||||
|
if s == nil || !s.sampled.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
limit := maxSpan.Attrs
|
||||||
|
if limit == 0 {
|
||||||
|
// No attributes allowed.
|
||||||
|
s.span.DroppedAttrs += uint32(len(attrs))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
m := make(map[string]int)
|
||||||
|
for i, a := range s.span.Attrs {
|
||||||
|
m[a.Key] = i
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, a := range attrs {
|
||||||
|
val := convAttrValue(a.Value)
|
||||||
|
if val.Empty() {
|
||||||
|
s.span.DroppedAttrs++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if idx, ok := m[string(a.Key)]; ok {
|
||||||
|
s.span.Attrs[idx] = telemetry.Attr{
|
||||||
|
Key: string(a.Key),
|
||||||
|
Value: val,
|
||||||
|
}
|
||||||
|
} else if limit < 0 || len(s.span.Attrs) < limit {
|
||||||
|
s.span.Attrs = append(s.span.Attrs, telemetry.Attr{
|
||||||
|
Key: string(a.Key),
|
||||||
|
Value: val,
|
||||||
|
})
|
||||||
|
m[string(a.Key)] = len(s.span.Attrs) - 1
|
||||||
|
} else {
|
||||||
|
s.span.DroppedAttrs++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The
|
||||||
|
// number of dropped attributes is also returned.
|
||||||
|
func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) {
|
||||||
|
if limit == 0 {
|
||||||
|
return nil, uint32(len(attrs))
|
||||||
|
}
|
||||||
|
|
||||||
|
if limit < 0 {
|
||||||
|
// Unlimited.
|
||||||
|
return convAttrs(attrs), 0
|
||||||
|
}
|
||||||
|
|
||||||
|
limit = min(len(attrs), limit)
|
||||||
|
return convAttrs(attrs[:limit]), uint32(len(attrs) - limit)
|
||||||
|
}
|
||||||
|
|
||||||
|
func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr {
|
||||||
|
if len(attrs) == 0 {
|
||||||
|
// Avoid allocations if not necessary.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]telemetry.Attr, 0, len(attrs))
|
||||||
|
for _, attr := range attrs {
|
||||||
|
key := string(attr.Key)
|
||||||
|
val := convAttrValue(attr.Value)
|
||||||
|
if val.Empty() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, telemetry.Attr{Key: key, Value: val})
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func convAttrValue(value attribute.Value) telemetry.Value {
|
||||||
|
switch value.Type() {
|
||||||
|
case attribute.BOOL:
|
||||||
|
return telemetry.BoolValue(value.AsBool())
|
||||||
|
case attribute.INT64:
|
||||||
|
return telemetry.Int64Value(value.AsInt64())
|
||||||
|
case attribute.FLOAT64:
|
||||||
|
return telemetry.Float64Value(value.AsFloat64())
|
||||||
|
case attribute.STRING:
|
||||||
|
v := truncate(maxSpan.AttrValueLen, value.AsString())
|
||||||
|
return telemetry.StringValue(v)
|
||||||
|
case attribute.BOOLSLICE:
|
||||||
|
slice := value.AsBoolSlice()
|
||||||
|
out := make([]telemetry.Value, 0, len(slice))
|
||||||
|
for _, v := range slice {
|
||||||
|
out = append(out, telemetry.BoolValue(v))
|
||||||
|
}
|
||||||
|
return telemetry.SliceValue(out...)
|
||||||
|
case attribute.INT64SLICE:
|
||||||
|
slice := value.AsInt64Slice()
|
||||||
|
out := make([]telemetry.Value, 0, len(slice))
|
||||||
|
for _, v := range slice {
|
||||||
|
out = append(out, telemetry.Int64Value(v))
|
||||||
|
}
|
||||||
|
return telemetry.SliceValue(out...)
|
||||||
|
case attribute.FLOAT64SLICE:
|
||||||
|
slice := value.AsFloat64Slice()
|
||||||
|
out := make([]telemetry.Value, 0, len(slice))
|
||||||
|
for _, v := range slice {
|
||||||
|
out = append(out, telemetry.Float64Value(v))
|
||||||
|
}
|
||||||
|
return telemetry.SliceValue(out...)
|
||||||
|
case attribute.STRINGSLICE:
|
||||||
|
slice := value.AsStringSlice()
|
||||||
|
out := make([]telemetry.Value, 0, len(slice))
|
||||||
|
for _, v := range slice {
|
||||||
|
v = truncate(maxSpan.AttrValueLen, v)
|
||||||
|
out = append(out, telemetry.StringValue(v))
|
||||||
|
}
|
||||||
|
return telemetry.SliceValue(out...)
|
||||||
|
}
|
||||||
|
return telemetry.Value{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// truncate returns a truncated version of s such that it contains less than
|
||||||
|
// the limit number of characters. Truncation is applied by returning the limit
|
||||||
|
// number of valid characters contained in s.
|
||||||
|
//
|
||||||
|
// If limit is negative, it returns the original string.
|
||||||
|
//
|
||||||
|
// UTF-8 is supported. When truncating, all invalid characters are dropped
|
||||||
|
// before applying truncation.
|
||||||
|
//
|
||||||
|
// If s already contains less than the limit number of bytes, it is returned
|
||||||
|
// unchanged. No invalid characters are removed.
|
||||||
|
func truncate(limit int, s string) string {
|
||||||
|
// This prioritize performance in the following order based on the most
|
||||||
|
// common expected use-cases.
|
||||||
|
//
|
||||||
|
// - Short values less than the default limit (128).
|
||||||
|
// - Strings with valid encodings that exceed the limit.
|
||||||
|
// - No limit.
|
||||||
|
// - Strings with invalid encodings that exceed the limit.
|
||||||
|
if limit < 0 || len(s) <= limit {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optimistically, assume all valid UTF-8.
|
||||||
|
var b strings.Builder
|
||||||
|
count := 0
|
||||||
|
for i, c := range s {
|
||||||
|
if c != utf8.RuneError {
|
||||||
|
count++
|
||||||
|
if count > limit {
|
||||||
|
return s[:i]
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, size := utf8.DecodeRuneInString(s[i:])
|
||||||
|
if size == 1 {
|
||||||
|
// Invalid encoding.
|
||||||
|
b.Grow(len(s) - 1)
|
||||||
|
_, _ = b.WriteString(s[:i])
|
||||||
|
s = s[i:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fast-path, no invalid input.
|
||||||
|
if b.Cap() == 0 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Truncate while validating UTF-8.
|
||||||
|
for i := 0; i < len(s) && count < limit; {
|
||||||
|
c := s[i]
|
||||||
|
if c < utf8.RuneSelf {
|
||||||
|
// Optimization for single byte runes (common case).
|
||||||
|
_ = b.WriteByte(c)
|
||||||
|
i++
|
||||||
|
count++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, size := utf8.DecodeRuneInString(s[i:])
|
||||||
|
if size == 1 {
|
||||||
|
// We checked for all 1-byte runes above, this is a RuneError.
|
||||||
|
i++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _ = b.WriteString(s[i : i+size])
|
||||||
|
i += size
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) End(opts ...trace.SpanEndOption) {
|
||||||
|
if s == nil || !s.sampled.Swap(false) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// s.end exists so the lock (s.mu) is not held while s.ended is called.
|
||||||
|
s.ended(s.end(opts))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) end(opts []trace.SpanEndOption) []byte {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
cfg := trace.NewSpanEndConfig(opts...)
|
||||||
|
if t := cfg.Timestamp(); !t.IsZero() {
|
||||||
|
s.span.EndTime = cfg.Timestamp()
|
||||||
|
} else {
|
||||||
|
s.span.EndTime = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
b, _ := json.Marshal(s.traces) // TODO: do not ignore this error.
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expected to be implemented in eBPF.
|
||||||
|
//
|
||||||
|
//go:noinline
|
||||||
|
func (*span) ended(buf []byte) { ended(buf) }
|
||||||
|
|
||||||
|
// ended is used for testing.
|
||||||
|
var ended = func([]byte) {}
|
||||||
|
|
||||||
|
func (s *span) RecordError(err error, opts ...trace.EventOption) {
|
||||||
|
if s == nil || err == nil || !s.sampled.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := trace.NewEventConfig(opts...)
|
||||||
|
|
||||||
|
attrs := cfg.Attributes()
|
||||||
|
attrs = append(attrs,
|
||||||
|
semconv.ExceptionType(typeStr(err)),
|
||||||
|
semconv.ExceptionMessage(err.Error()),
|
||||||
|
)
|
||||||
|
if cfg.StackTrace() {
|
||||||
|
buf := make([]byte, 2048)
|
||||||
|
n := runtime.Stack(buf, false)
|
||||||
|
attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n])))
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func typeStr(i any) string {
|
||||||
|
t := reflect.TypeOf(i)
|
||||||
|
if t.PkgPath() == "" && t.Name() == "" {
|
||||||
|
// Likely a builtin type.
|
||||||
|
return t.String()
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) AddEvent(name string, opts ...trace.EventOption) {
|
||||||
|
if s == nil || !s.sampled.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := trace.NewEventConfig(opts...)
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
s.addEvent(name, cfg.Timestamp(), cfg.Attributes())
|
||||||
|
}
|
||||||
|
|
||||||
|
// addEvent adds an event with name and attrs at tStamp to the span. The span
|
||||||
|
// lock (s.mu) needs to be held by the caller.
|
||||||
|
func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) {
|
||||||
|
limit := maxSpan.Events
|
||||||
|
|
||||||
|
if limit == 0 {
|
||||||
|
s.span.DroppedEvents++
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if limit > 0 && len(s.span.Events) == limit {
|
||||||
|
// Drop head while avoiding allocation of more capacity.
|
||||||
|
copy(s.span.Events[:limit-1], s.span.Events[1:])
|
||||||
|
s.span.Events = s.span.Events[:limit-1]
|
||||||
|
s.span.DroppedEvents++
|
||||||
|
}
|
||||||
|
|
||||||
|
e := &telemetry.SpanEvent{Time: tStamp, Name: name}
|
||||||
|
e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs)
|
||||||
|
|
||||||
|
s.span.Events = append(s.span.Events, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) AddLink(link trace.Link) {
|
||||||
|
if s == nil || !s.sampled.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
l := maxSpan.Links
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
if l == 0 {
|
||||||
|
s.span.DroppedLinks++
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if l > 0 && len(s.span.Links) == l {
|
||||||
|
// Drop head while avoiding allocation of more capacity.
|
||||||
|
copy(s.span.Links[:l-1], s.span.Links[1:])
|
||||||
|
s.span.Links = s.span.Links[:l-1]
|
||||||
|
s.span.DroppedLinks++
|
||||||
|
}
|
||||||
|
|
||||||
|
s.span.Links = append(s.span.Links, convLink(link))
|
||||||
|
}
|
||||||
|
|
||||||
|
func convLinks(links []trace.Link) []*telemetry.SpanLink {
|
||||||
|
out := make([]*telemetry.SpanLink, 0, len(links))
|
||||||
|
for _, link := range links {
|
||||||
|
out = append(out, convLink(link))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func convLink(link trace.Link) *telemetry.SpanLink {
|
||||||
|
l := &telemetry.SpanLink{
|
||||||
|
TraceID: telemetry.TraceID(link.SpanContext.TraceID()),
|
||||||
|
SpanID: telemetry.SpanID(link.SpanContext.SpanID()),
|
||||||
|
TraceState: link.SpanContext.TraceState().String(),
|
||||||
|
Flags: uint32(link.SpanContext.TraceFlags()),
|
||||||
|
}
|
||||||
|
l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes)
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *span) SetName(name string) {
|
||||||
|
if s == nil || !s.sampled.Load() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
s.span.Name = name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() }
|
||||||
|
|
@ -0,0 +1,124 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package sdk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
"go.opentelemetry.io/otel/trace/noop"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/auto/sdk/internal/telemetry"
|
||||||
|
)
|
||||||
|
|
||||||
|
type tracer struct {
|
||||||
|
noop.Tracer
|
||||||
|
|
||||||
|
name, schemaURL, version string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ trace.Tracer = tracer{}
|
||||||
|
|
||||||
|
func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
|
||||||
|
var psc trace.SpanContext
|
||||||
|
sampled := true
|
||||||
|
span := new(span)
|
||||||
|
|
||||||
|
// Ask eBPF for sampling decision and span context info.
|
||||||
|
t.start(ctx, span, &psc, &sampled, &span.spanContext)
|
||||||
|
|
||||||
|
span.sampled.Store(sampled)
|
||||||
|
|
||||||
|
ctx = trace.ContextWithSpan(ctx, span)
|
||||||
|
|
||||||
|
if sampled {
|
||||||
|
// Only build traces if sampled.
|
||||||
|
cfg := trace.NewSpanStartConfig(opts...)
|
||||||
|
span.traces, span.span = t.traces(name, cfg, span.spanContext, psc)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx, span
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expected to be implemented in eBPF.
|
||||||
|
//
|
||||||
|
//go:noinline
|
||||||
|
func (t *tracer) start(
|
||||||
|
ctx context.Context,
|
||||||
|
spanPtr *span,
|
||||||
|
psc *trace.SpanContext,
|
||||||
|
sampled *bool,
|
||||||
|
sc *trace.SpanContext,
|
||||||
|
) {
|
||||||
|
start(ctx, spanPtr, psc, sampled, sc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// start is used for testing.
|
||||||
|
var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {}
|
||||||
|
|
||||||
|
func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) {
|
||||||
|
span := &telemetry.Span{
|
||||||
|
TraceID: telemetry.TraceID(sc.TraceID()),
|
||||||
|
SpanID: telemetry.SpanID(sc.SpanID()),
|
||||||
|
Flags: uint32(sc.TraceFlags()),
|
||||||
|
TraceState: sc.TraceState().String(),
|
||||||
|
ParentSpanID: telemetry.SpanID(psc.SpanID()),
|
||||||
|
Name: name,
|
||||||
|
Kind: spanKind(cfg.SpanKind()),
|
||||||
|
}
|
||||||
|
|
||||||
|
span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes())
|
||||||
|
|
||||||
|
links := cfg.Links()
|
||||||
|
if limit := maxSpan.Links; limit == 0 {
|
||||||
|
span.DroppedLinks = uint32(len(links))
|
||||||
|
} else {
|
||||||
|
if limit > 0 {
|
||||||
|
n := max(len(links)-limit, 0)
|
||||||
|
span.DroppedLinks = uint32(n)
|
||||||
|
links = links[n:]
|
||||||
|
}
|
||||||
|
span.Links = convLinks(links)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t := cfg.Timestamp(); !t.IsZero() {
|
||||||
|
span.StartTime = cfg.Timestamp()
|
||||||
|
} else {
|
||||||
|
span.StartTime = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
return &telemetry.Traces{
|
||||||
|
ResourceSpans: []*telemetry.ResourceSpans{
|
||||||
|
{
|
||||||
|
ScopeSpans: []*telemetry.ScopeSpans{
|
||||||
|
{
|
||||||
|
Scope: &telemetry.Scope{
|
||||||
|
Name: t.name,
|
||||||
|
Version: t.version,
|
||||||
|
},
|
||||||
|
Spans: []*telemetry.Span{span},
|
||||||
|
SchemaURL: t.schemaURL,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, span
|
||||||
|
}
|
||||||
|
|
||||||
|
func spanKind(kind trace.SpanKind) telemetry.SpanKind {
|
||||||
|
switch kind {
|
||||||
|
case trace.SpanKindInternal:
|
||||||
|
return telemetry.SpanKindInternal
|
||||||
|
case trace.SpanKindServer:
|
||||||
|
return telemetry.SpanKindServer
|
||||||
|
case trace.SpanKindClient:
|
||||||
|
return telemetry.SpanKindClient
|
||||||
|
case trace.SpanKindProducer:
|
||||||
|
return telemetry.SpanKindProducer
|
||||||
|
case trace.SpanKindConsumer:
|
||||||
|
return telemetry.SpanKindConsumer
|
||||||
|
}
|
||||||
|
return telemetry.SpanKind(0) // undefined.
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,33 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
package sdk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
"go.opentelemetry.io/otel/trace/noop"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TracerProvider returns an auto-instrumentable [trace.TracerProvider].
|
||||||
|
//
|
||||||
|
// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument
|
||||||
|
// the process using the returned TracerProvider, all of the telemetry it
|
||||||
|
// produces will be processed and handled by that Instrumentation. By default,
|
||||||
|
// if no Instrumentation instruments the TracerProvider it will not generate
|
||||||
|
// any trace telemetry.
|
||||||
|
func TracerProvider() trace.TracerProvider { return tracerProviderInstance }
|
||||||
|
|
||||||
|
var tracerProviderInstance = new(tracerProvider)
|
||||||
|
|
||||||
|
type tracerProvider struct{ noop.TracerProvider }
|
||||||
|
|
||||||
|
var _ trace.TracerProvider = tracerProvider{}
|
||||||
|
|
||||||
|
func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
|
||||||
|
cfg := trace.NewTracerConfig(opts...)
|
||||||
|
return tracer{
|
||||||
|
name: name,
|
||||||
|
version: cfg.InstrumentationVersion(),
|
||||||
|
schemaURL: cfg.SchemaURL(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,3 @@
|
||||||
|
exemptions:
|
||||||
|
- check: artifacthub_badge
|
||||||
|
reason: "Artifact Hub doesn't support Go packages"
|
||||||
|
|
@ -0,0 +1,9 @@
|
||||||
|
ot
|
||||||
|
fo
|
||||||
|
te
|
||||||
|
collison
|
||||||
|
consequentially
|
||||||
|
ans
|
||||||
|
nam
|
||||||
|
valu
|
||||||
|
thirdparty
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue