Update Controller Runtime Go module to v0.8.2

This commit is contained in:
Barry Melbourne 2021-02-24 18:36:48 +00:00
parent 7759c754cb
commit 659bfa0daf
418 changed files with 19455 additions and 8592 deletions

View File

@ -85,8 +85,7 @@ type LegacyNodeReconciler struct {
// +kubebuilder:rbac:groups=,resources=nodes,verbs=get;list;watch;patch
// Reconcile is the main reconciler function that observes node changes.
func (r *LegacyNodeReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
func (r *LegacyNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
_ = r.log.WithValues("nodecontroller", req.NamespacedName)
node := &corev1.Node{}

View File

@ -69,8 +69,7 @@ type NodeReconciler struct {
// +kubebuilder:rbac:groups=,resources=nodes,verbs=get;list;watch;patch
// Reconcile is the main reconciler function that observes node changes.
func (r *NodeReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
ctx := context.Background()
func (r *NodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
_ = r.log.WithValues("nodecontroller", req.NamespacedName)
node := &corev1.Node{}

105
go.mod
View File

@ -4,47 +4,29 @@ go 1.14
// Version kubernetes-1.20.0-beta.2 => tag v0.20.0
replace k8s.io/api => k8s.io/api v0.20.0
replace k8s.io/apimachinery => k8s.io/apimachinery v0.20.0
replace k8s.io/client-go => k8s.io/client-go v0.20.0
replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.0
replace k8s.io/kubectl => k8s.io/kubectl v0.20.0
replace k8s.io/apiserver => k8s.io/apiserver v0.20.0
replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.0
replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.0
replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.0
replace k8s.io/cri-api => k8s.io/cri-api v0.20.0
replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.0
replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.0
replace k8s.io/component-base => k8s.io/component-base v0.20.0
replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.0
replace k8s.io/metrics => k8s.io/metrics v0.20.0
replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.0
replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.0
replace k8s.io/kubelet => k8s.io/kubelet v0.20.0
replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.0
replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.0
replace k8s.io/kubernetes => k8s.io/kubernetes v0.20.0
replace (
k8s.io/api => k8s.io/api v0.20.0
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.0
k8s.io/apimachinery => k8s.io/apimachinery v0.20.0
k8s.io/apiserver => k8s.io/apiserver v0.20.0
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.0
k8s.io/client-go => k8s.io/client-go v0.20.0
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.0
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.0
k8s.io/component-base => k8s.io/component-base v0.20.0
k8s.io/cri-api => k8s.io/cri-api v0.20.0
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.0
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.0
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.0
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.0
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.0
k8s.io/kubectl => k8s.io/kubectl v0.20.0
k8s.io/kubelet => k8s.io/kubelet v0.20.0
k8s.io/kubernetes => k8s.io/kubernetes v0.20.0
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.0
k8s.io/metrics => k8s.io/metrics v0.20.0
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.0
)
// This should match hack/go.mod
replace k8s.io/code-generator => k8s.io/code-generator v0.20.0
@ -70,19 +52,23 @@ require (
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect
github.com/go-bindata/go-bindata/v3 v3.1.3
github.com/go-ini/ini v1.62.0
github.com/go-logr/logr v0.2.1-0.20200730175230-ee2de8da5be6
github.com/gogo/protobuf v1.3.1
github.com/go-logr/logr v0.4.0
github.com/gogo/protobuf v1.3.2
github.com/google/go-cmp v0.5.4
github.com/google/uuid v1.1.2
github.com/google/uuid v1.2.0
github.com/googleapis/gnostic v0.5.4 // indirect
github.com/gophercloud/gophercloud v0.15.0
github.com/hashicorp/hcl/v2 v2.7.0
github.com/hashicorp/vault/api v1.0.4
github.com/imdario/mergo v0.3.11 // indirect
github.com/jacksontj/memberlistmesh v0.0.0-20190905163944-93462b9d2bb7
github.com/jetstack/cert-manager v1.1.0
github.com/mitchellh/mapstructure v1.1.2
github.com/pelletier/go-toml v1.8.1
github.com/pkg/sftp v1.12.0
github.com/prometheus/client_golang v1.7.1
github.com/prometheus/client_golang v1.9.0
github.com/prometheus/common v0.17.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/sergi/go-diff v1.1.0
github.com/spf13/cobra v1.1.1
github.com/spf13/pflag v1.0.5
@ -91,26 +77,33 @@ require (
github.com/stretchr/testify v1.6.1
github.com/weaveworks/mesh v0.0.0-20170419100114-1f158d31de55
github.com/zclconf/go-cty v1.3.1
golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9
golang.org/x/net v0.0.0-20210119194325-5f4716e94777
golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93
golang.org/x/sys v0.0.0-20210223212115-eede4237b368
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
google.golang.org/api v0.40.0
gopkg.in/gcfg.v1 v1.2.3
gopkg.in/inf.v0 v0.9.1
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
helm.sh/helm/v3 v3.4.2
honnef.co/go/tools v0.0.1-2020.1.6 // indirect
k8s.io/api v0.20.0
k8s.io/apimachinery v0.20.0
k8s.io/api v0.20.4
k8s.io/apiextensions-apiserver v0.20.4 // indirect
k8s.io/apimachinery v0.20.4
k8s.io/cli-runtime v0.20.0
k8s.io/client-go v0.20.0
k8s.io/client-go v1.5.1
k8s.io/cloud-provider-openstack v1.20.1
k8s.io/component-base v0.20.0
k8s.io/component-base v0.20.4
k8s.io/gengo v0.0.0-20201113003025-83324d819ded
k8s.io/klog/v2 v2.4.0
k8s.io/klog/v2 v2.5.0
k8s.io/kube-openapi v0.0.0-20210216185858-15cd8face8d6 // indirect
k8s.io/kubectl v0.19.4
k8s.io/legacy-cloud-providers v0.0.0
k8s.io/utils v0.0.0-20201110183641-67b214c5f920
sigs.k8s.io/controller-runtime v0.6.2
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009
sigs.k8s.io/controller-runtime v0.8.2
sigs.k8s.io/structured-merge-diff/v4 v4.0.3 // indirect
sigs.k8s.io/yaml v1.2.0
)

86
go.sum
View File

@ -133,6 +133,7 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/aliyun/alibaba-cloud-sdk-go v1.61.264 h1:5o7dIWOmKWEujFfzD3XV8EmpnCbqekpTqdEsBlfNp6c=
github.com/aliyun/alibaba-cloud-sdk-go v1.61.264/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
@ -327,9 +328,15 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.2.1-0.20200730175230-ee2de8da5be6 h1:ZPVluSmhtMIHlqUDMZu70FgMpRzbQfl4h9oKCAXOVDE=
github.com/go-logr/logr v0.2.1-0.20200730175230-ee2de8da5be6/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs=
github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
github.com/go-logr/zapr v0.1.1 h1:qXBXPDdNncunGs7XeEpsJt8wCjYBygluzfdLO0G5baE=
github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4=
github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
@ -369,6 +376,8 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@ -445,12 +454,18 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.4 h1:ynbQIWjLw7iv6HAFdixb30U7Uvcmx+f4KlLJpmhkTK0=
github.com/googleapis/gnostic v0.5.4/go.mod h1:TRWw1s4gxBGjSe301Dai3c7wXJAZy57+/6tawkOvqHQ=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gophercloud/gophercloud v0.6.1-0.20191122030953-d8ac278c1c9d/go.mod h1:ozGNgr9KYOVATV5jsgHl/ceCDXGuguqOZAzoQ/2vcNM=
github.com/gophercloud/gophercloud v0.12.1-0.20200922152735-900f5ba8c05b/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM=
@ -546,6 +561,10 @@ github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
@ -565,6 +584,7 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@ -576,9 +596,12 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0 h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -674,6 +697,7 @@ github.com/munnerz/crd-schema-fuzz v1.0.0/go.mod h1:4z/rcm37JxUkSsExFcLL6ZIT1SgD
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
@ -703,6 +727,8 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4=
github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@ -712,6 +738,8 @@ github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs=
github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@ -771,6 +799,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU=
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
@ -788,6 +818,9 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/common v0.17.0 h1:kDIZLI74SS+3tedSvEkykgBkD7txMxaJAPj8DtJUKYA=
github.com/prometheus/common v0.17.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@ -800,6 +833,8 @@ github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY=
@ -871,6 +906,7 @@ github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spotinst/spotinst-sdk-go v1.76.0 h1:tlFKrUAu7h3FgZM3Xy7436XR5lYo5Q/uePEy0AX5Ydw=
github.com/spotinst/spotinst-sdk-go v1.76.0/go.mod h1:sSRVZTSdUAPxeELD/urZkxcfU/DcxO1/UIdOxagqFBc=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
@ -935,14 +971,23 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0 h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -964,8 +1009,8 @@ golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9 h1:sYNJzB4J8toYPQTM6pAkcmBRgw9SnQKP9oXCHfgy604=
golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1049,6 +1094,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1059,6 +1106,8 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3 h1:BaN3BAqnopnKjvl+15DYP6LLrbBHfbfmlFYzmFj/Q9Q=
golang.org/x/oauth2 v0.0.0-20210113205817-d3ed898aa8a3/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93 h1:alLDrZkL34Y2bnGHfvC1CYBRBXCXgx8AC2vY4MRtYX4=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1125,22 +1174,27 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43 h1:SgQ6LNaYJU0JIuEHv9+s6EbhSCwYeAf5Yvj6lpYlqAE=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210223212115-eede4237b368 h1:fDE3p0qf2V1co1vfj3/o87Ps8Hq6QTGNxJ5Xe7xSp80=
golang.org/x/sys v0.0.0-20210223212115-eede4237b368/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1157,6 +1211,8 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -1183,6 +1239,7 @@ golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@ -1211,6 +1268,7 @@ golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
@ -1219,6 +1277,7 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1228,6 +1287,8 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1N
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0=
gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k=
gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
@ -1294,6 +1355,7 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@ -1380,10 +1442,16 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c h1:grhR+C34yXImVGp7EzNk+DTIk+323eIUWOmEevy6bDo=
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
@ -1432,10 +1500,14 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.5.0 h1:8mOnjf1RmUPW6KRqQCfYSZq/K20Unmp3IhuZUhxl8KI=
k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/kube-aggregator v0.20.0/go.mod h1:3Is/gzzWmhhG/rA3CpA1+eVye87lreBQDFGcAGT7gzo=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20210216185858-15cd8face8d6 h1:37dOBBPjjBJGIfD+BlzVcjICVLX6fDDIwt5H1UnhXXM=
k8s.io/kube-openapi v0.0.0-20210216185858-15cd8face8d6/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
k8s.io/kubectl v0.20.0 h1:q6HH6jILYi2lkzFqBhs63M4bKLxYlM0HpFJ///MgARA=
k8s.io/kubectl v0.20.0/go.mod h1:8x5GzQkgikz7M2eFGGuu6yOfrenwnw5g4RXOUgbjR1M=
k8s.io/kubernetes v0.20.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
@ -1447,18 +1519,24 @@ k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ=
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/controller-runtime v0.6.2 h1:jkAnfdTYBpFwlmBn3pS5HFO06SfxvnTZ1p5PeEF/zAA=
sigs.k8s.io/controller-runtime v0.6.2/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E=
sigs.k8s.io/controller-runtime v0.8.2 h1:SBWmI0b3uzMIUD/BIXWNegrCeZmPJ503pOtwxY0LPHM=
sigs.k8s.io/controller-runtime v0.8.2/go.mod h1:U/l+DUopBc1ecfRZ5aviA9JDmGFQKvLf5YkZNx2e0sU=
sigs.k8s.io/controller-tools v0.2.9-0.20200414181213-645d44dca7c0/go.mod h1:YKE/iHvcKITCljdnlqHYe+kAt7ZldvtAwUzQff0k1T0=
sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.3 h1:4oyYo8NREp49LBBhKxEqCulFjg26rawYKrnCmg+Sr6c=
sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -75,7 +75,14 @@ Contents: |
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager --backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-events --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true --dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3997 --insecure=false --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-events
--client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true
--dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3997
--insecure=false --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995
--v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
image: kopeio/etcd-manager:3.0.20210122
name: etcd-manager
resources:
@ -133,7 +140,14 @@ Contents: |
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager --backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-main --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true --dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3996 --insecure=false --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-main
--client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true
--dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3996
--insecure=false --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994
--v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
image: kopeio/etcd-manager:3.0.20210122
name: etcd-manager
resources:

View File

@ -75,7 +75,14 @@ Contents: |
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager --backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-events --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true --dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3997 --insecure=false --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-events
--client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true
--dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3997
--insecure=false --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995
--v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
image: kopeio/etcd-manager:3.0.20210122
name: etcd-manager
resources:
@ -139,7 +146,14 @@ Contents: |
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager --backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-main --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true --dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3996 --insecure=false --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-main
--client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true
--dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3996
--insecure=false --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994
--v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
image: kopeio/etcd-manager:3.0.20210122
name: etcd-manager
resources:

View File

@ -75,7 +75,14 @@ Contents: |
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager --backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-events --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true --dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3997 --insecure=false --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995 --v=3 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-events
--client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true
--dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3997
--insecure=false --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995
--v=3 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
env:
- name: ETCD_QUOTA_BACKEND_BYTES
value: "10737418240"
@ -136,7 +143,14 @@ Contents: |
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager --backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-main --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true --dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3996 --insecure=false --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994 --v=3 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-main
--client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true
--dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3996
--insecure=false --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994
--v=3 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
env:
- name: ETCD_QUOTA_BACKEND_BYTES
value: "10737418240"

View File

@ -75,7 +75,14 @@ Contents: |
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager --backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-events --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true --dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3997 --insecure=false --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-events
--client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true
--dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3997
--insecure=false --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995
--v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
env:
- name: NO_PROXY
value: noproxy.example.com
@ -148,7 +155,14 @@ Contents: |
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager --backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-main --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true --dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3996 --insecure=false --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-main
--client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true
--dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3996
--insecure=false --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994
--v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
env:
- name: NO_PROXY
value: noproxy.example.com

View File

@ -38,7 +38,8 @@ spec:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /usr/local/bin/etcd > /tmp/pipe 2>&1
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /usr/local/bin/etcd >
/tmp/pipe 2>&1
env:
- name: ETCD_NAME
value: node0

View File

@ -36,7 +36,8 @@ spec:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /usr/local/bin/etcd > /tmp/pipe 2>&1
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /usr/local/bin/etcd >
/tmp/pipe 2>&1
env:
- name: ETCD_NAME
value: node0

View File

@ -42,7 +42,8 @@ spec:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /usr/local/bin/etcd > /tmp/pipe 2>&1
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /usr/local/bin/etcd >
/tmp/pipe 2>&1
env:
- name: ETCD_NAME
value: node0

View File

@ -2,7 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["logr.go"],
srcs = [
"discard.go",
"logr.go",
],
importmap = "k8s.io/kops/vendor/github.com/go-logr/logr",
importpath = "github.com/go-logr/logr",
visibility = ["//visibility:public"],

View File

@ -41,6 +41,8 @@ There are implementations for the following logging libraries:
- **log** (the Go standard library logger):
[stdr](https://github.com/go-logr/stdr)
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
# FAQ

51
vendor/github.com/go-logr/logr/discard.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
/*
Copyright 2020 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
// Discard returns a valid Logger that discards all messages logged to it.
// It can be used whenever the caller is not interested in the logs.
func Discard() Logger {
return DiscardLogger{}
}
// DiscardLogger is a Logger that discards all messages.
type DiscardLogger struct{}
func (l DiscardLogger) Enabled() bool {
return false
}
func (l DiscardLogger) Info(msg string, keysAndValues ...interface{}) {
}
func (l DiscardLogger) Error(err error, msg string, keysAndValues ...interface{}) {
}
func (l DiscardLogger) V(level int) Logger {
return l
}
func (l DiscardLogger) WithValues(keysAndValues ...interface{}) Logger {
return l
}
func (l DiscardLogger) WithName(name string) Logger {
return l
}
// Verify that it actually implements the interface
var _ Logger = DiscardLogger{}

View File

@ -14,18 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package logr defines abstract interfaces for logging. Packages can depend on
// these interfaces and callers can implement logging in whatever way is
// appropriate.
//
// This design derives from Dave Cheney's blog:
// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
//
// This is a BETA grade API. Until there is a significant 2nd implementation,
// I don't really know how it will change.
//
// The logging specifically makes it non-trivial to use format strings, to encourage
// attaching structured information instead of unstructured format strings.
// Package logr defines abstract interfaces for logging. Packages can depend on
// these interfaces and callers can implement logging in whatever way is
// appropriate.
//
// Usage
//
@ -40,17 +37,16 @@ limitations under the License.
// we want to log that we've made some decision.
//
// With the traditional log package, we might write:
// log.Printf(
// "decided to set field foo to value %q for object %s/%s",
// log.Printf("decided to set field foo to value %q for object %s/%s",
// targetValue, object.Namespace, object.Name)
//
// With logr's structured logging, we'd write:
// // elsewhere in the file, set up the logger to log with the prefix of "reconcilers",
// // and the named value target-type=Foo, for extra context.
// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo")
// // elsewhere in the file, set up the logger to log with the prefix of
// // "reconcilers", and the named value target-type=Foo, for extra context.
// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo")
//
// // later on...
// log.Info("setting field foo on object", "value", targetValue, "object", object)
// // later on...
// log.Info("setting foo on object", "value", targetValue, "object", object)
//
// Depending on our logging implementation, we could then make logging decisions
// based on field values (like only logging such events for objects in a certain
@ -78,9 +74,9 @@ limitations under the License.
// Each log message from a Logger has four types of context:
// logger name, log verbosity, log message, and the named values.
//
// The Logger name constists of a series of name "segments" added by successive
// The Logger name consists of a series of name "segments" added by successive
// calls to WithName. These name segments will be joined in some way by the
// underlying implementation. It is strongly reccomended that name segements
// underlying implementation. It is strongly recommended that name segments
// contain simple identifiers (letters, digits, and hyphen), and do not contain
// characters that could muddle the log output or confuse the joining operation
// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc).
@ -91,8 +87,8 @@ limitations under the License.
// and log messages for users to filter on. It's illegal to pass a log level
// below zero.
//
// The log message consists of a constant message attached to the the log line.
// This should generally be a simple description of what's occuring, and should
// The log message consists of a constant message attached to the log line.
// This should generally be a simple description of what's occurring, and should
// never be a format string.
//
// Variable information can then be attached using named values (key/value
@ -115,24 +111,38 @@ limitations under the License.
// generally best to avoid using the following keys, as they're frequently used
// by implementations:
//
// - `"caller"`: the calling information (file/line) of a particular log line.
// - `"error"`: the underlying error value in the `Error` method.
// - `"level"`: the log level.
// - `"logger"`: the name of the associated logger.
// - `"msg"`: the log message.
// - `"stacktrace"`: the stack trace associated with a particular log line or
// error (often from the `Error` message).
// - `"ts"`: the timestamp for a log line.
// * `"caller"`: the calling information (file/line) of a particular log line.
// * `"error"`: the underlying error value in the `Error` method.
// * `"level"`: the log level.
// * `"logger"`: the name of the associated logger.
// * `"msg"`: the log message.
// * `"stacktrace"`: the stack trace associated with a particular log line or
// error (often from the `Error` message).
// * `"ts"`: the timestamp for a log line.
//
// Implementations are encouraged to make use of these keys to represent the
// above concepts, when neccessary (for example, in a pure-JSON output form, it
// above concepts, when necessary (for example, in a pure-JSON output form, it
// would be necessary to represent at least message and timestamp as ordinary
// named values).
//
// Implementations may choose to give callers access to the underlying
// logging implementation. The recommended pattern for this is:
// // Underlier exposes access to the underlying logging implementation.
// // Since callers only have a logr.Logger, they have to know which
// // implementation is in use, so this interface is less of an abstraction
// // and more of way to test type conversion.
// type Underlier interface {
// GetUnderlying() <underlying-type>
// }
package logr
import (
"context"
)
// TODO: consider adding back in format strings if they're really needed
// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects)
// TODO: consider other bits of glog functionality like Flush, InfoDepth, OutputStats
// TODO: consider other bits of glog functionality like Flush, OutputStats
// Logger represents the ability to log messages, both errors and not.
type Logger interface {
@ -171,12 +181,86 @@ type Logger interface {
// WithName adds a new element to the logger's name.
// Successive calls with WithName continue to append
// suffixes to the logger's name. It's strongly reccomended
// suffixes to the logger's name. It's strongly recommended
// that name segments contain only letters, digits, and hyphens
// (see the package documentation for more information).
WithName(name string) Logger
}
// InfoLogger provides compatibility with code that relies on the v0.1.0 interface
// Deprecated: use Logger instead. This will be removed in a future release.
// InfoLogger provides compatibility with code that relies on the v0.1.0
// interface.
//
// Deprecated: InfoLogger is an artifact of early versions of this API. New
// users should never use it and existing users should use Logger instead. This
// will be removed in a future release.
type InfoLogger = Logger
type contextKey struct{}
// FromContext returns a Logger constructed from ctx or nil if no
// logger details are found.
func FromContext(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return nil
}
// FromContextOrDiscard returns a Logger constructed from ctx or a Logger
// that discards all messages if no logger details are found.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new context derived from ctx that embeds the Logger.
func NewContext(ctx context.Context, l Logger) context.Context {
return context.WithValue(ctx, contextKey{}, l)
}
// CallDepthLogger represents a Logger that knows how to climb the call stack
// to identify the original call site and can offset the depth by a specified
// number of frames. This is useful for users who have helper functions
// between the "real" call site and the actual calls to Logger methods.
// Implementations that log information about the call site (such as file,
// function, or line) would otherwise log information about the intermediate
// helper functions.
//
// This is an optional interface and implementations are not required to
// support it.
type CallDepthLogger interface {
Logger
// WithCallDepth returns a Logger that will offset the call stack by the
// specified number of frames when logging call site information. If depth
// is 0 the attribution should be to the direct caller of this method. If
// depth is 1 the attribution should skip 1 call frame, and so on.
// Successive calls to this are additive.
WithCallDepth(depth int) Logger
}
// WithCallDepth returns a Logger that will offset the call stack by the
// specified number of frames when logging call site information, if possible.
// This is useful for users who have helper functions between the "real" call
// site and the actual calls to Logger methods. If depth is 0 the attribution
// should be to the direct caller of this function. If depth is 1 the
// attribution should skip 1 call frame, and so on. Successive calls to this
// are additive.
//
// If the underlying log implementation supports the CallDepthLogger interface,
// the WithCallDepth method will be called and the result returned. If the
// implementation does not support CallDepthLogger, the original Logger will be
// returned.
//
// Callers which care about whether this was supported or not should test for
// CallDepthLogger support themselves.
func WithCallDepth(logger Logger, depth int) Logger {
if decorator, ok := logger.(CallDepthLogger); ok {
return decorator.WithCallDepth(depth)
}
return logger
}

View File

@ -318,7 +318,7 @@ func unescape(s string) (ch string, tail string, err error) {
if i > utf8.MaxRune {
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
}
return string(i), s, nil
return string(rune(i)), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}

View File

@ -26,8 +26,8 @@ var (
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
h.Write(space[:])
h.Write(data)
h.Write(space[:]) //nolint:errcheck
h.Write(data) //nolint:errcheck
s := h.Sum(nil)
var uuid UUID
copy(uuid[:], s)

View File

@ -9,7 +9,7 @@ import (
"fmt"
)
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {

View File

@ -35,6 +35,12 @@ const (
var rander = rand.Reader // random function
type invalidLengthError struct{ len int }
func (err invalidLengthError) Error() string {
return fmt.Sprintf("invalid UUID length: %d", err.len)
}
// Parse decodes s into a UUID or returns an error. Both the standard UUID
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
@ -68,7 +74,7 @@ func Parse(s string) (UUID, error) {
}
return uuid, nil
default:
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
return uuid, invalidLengthError{len(s)}
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
@ -112,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) {
}
return uuid, nil
default:
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
return uuid, invalidLengthError{len(b)}
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx

View File

@ -14,6 +14,14 @@ func New() UUID {
return Must(NewRandom())
}
// NewString creates a new random UUID and returns it as a string or panics.
// NewString is equivalent to the expression
//
// uuid.New().String()
func NewString() string {
return Must(NewRandom()).String()
}
// NewRandom returns a Random (Version 4) UUID.
//
// The strength of the UUIDs is based on the strength of the crypto/rand

View File

@ -5,7 +5,7 @@ go_library(
srcs = [
"context.go",
"error.go",
"extension-handler.go",
"extensions.go",
"helpers.go",
"main.go",
"reader.go",
@ -17,6 +17,7 @@ go_library(
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/github.com/golang/protobuf/ptypes/any:go_default_library",
"//vendor/github.com/googleapis/gnostic/extensions:go_default_library",
"//vendor/gopkg.in/yaml.v2:go_default_library",
"//vendor/github.com/googleapis/gnostic/jsonschema:go_default_library",
"//vendor/gopkg.in/yaml.v3:go_default_library",
],
)

View File

@ -1,3 +1,4 @@
# Compiler support code
This directory contains compiler support code used by Gnostic and Gnostic extensions.
This directory contains compiler support code used by Gnostic and Gnostic
extensions.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,30 +14,36 @@
package compiler
import (
yaml "gopkg.in/yaml.v3"
)
// Context contains state of the compiler as it traverses a document.
type Context struct {
Parent *Context
Name string
Node *yaml.Node
ExtensionHandlers *[]ExtensionHandler
}
// NewContextWithExtensions returns a new object representing the compiler state
func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers}
func NewContextWithExtensions(name string, node *yaml.Node, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: extensionHandlers}
}
// NewContext returns a new object representing the compiler state
func NewContext(name string, parent *Context) *Context {
func NewContext(name string, node *yaml.Node, parent *Context) *Context {
if parent != nil {
return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
}
return &Context{Name: name, Parent: parent, ExtensionHandlers: nil}
}
// Description returns a text description of the compiler state
func (context *Context) Description() string {
name := context.Name
if context.Parent != nil {
return context.Parent.Description() + "." + context.Name
name = context.Parent.Description() + "." + name
}
return context.Name
return name
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,6 +14,8 @@
package compiler
import "fmt"
// Error represents compiler errors and their location in the document.
type Error struct {
Context *Context
@ -25,12 +27,19 @@ func NewError(context *Context, message string) *Error {
return &Error{Context: context, Message: message}
}
func (err *Error) locationDescription() string {
if err.Context.Node != nil {
return fmt.Sprintf("[%d,%d] %s", err.Context.Node.Line, err.Context.Node.Column, err.Context.Description())
}
return err.Context.Description()
}
// Error returns the string value of an Error.
func (err *Error) Error() string {
if err.Context == nil {
return "ERROR " + err.Message
return err.Message
}
return "ERROR " + err.Context.Description() + " " + err.Message
return err.locationDescription() + " " + err.Message
}
// ErrorGroup is a container for groups of Error values.

View File

@ -1,101 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"bytes"
"fmt"
"os/exec"
"strings"
"errors"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
ext_plugin "github.com/googleapis/gnostic/extensions"
yaml "gopkg.in/yaml.v2"
)
// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
type ExtensionHandler struct {
Name string
}
// HandleExtension calls a binary extension handler.
func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) {
handled := false
var errFromPlugin error
var outFromPlugin *any.Any
if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 {
for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) {
outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName)
if outFromPlugin == nil {
continue
} else {
handled = true
break
}
}
}
return handled, outFromPlugin, errFromPlugin
}
func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) {
if extensionHandlers.Name != "" {
binary, _ := yaml.Marshal(in)
request := &ext_plugin.ExtensionHandlerRequest{}
version := &ext_plugin.Version{}
version.Major = 0
version.Minor = 1
version.Patch = 0
request.CompilerVersion = version
request.Wrapper = &ext_plugin.Wrapper{}
request.Wrapper.Version = "v2"
request.Wrapper.Yaml = string(binary)
request.Wrapper.ExtensionName = extensionName
requestBytes, _ := proto.Marshal(request)
cmd := exec.Command(extensionHandlers.Name)
cmd.Stdin = bytes.NewReader(requestBytes)
output, err := cmd.Output()
if err != nil {
fmt.Printf("Error: %+v\n", err)
return nil, err
}
response := &ext_plugin.ExtensionHandlerResponse{}
err = proto.Unmarshal(output, response)
if err != nil {
fmt.Printf("Error: %+v\n", err)
fmt.Printf("%s\n", string(output))
return nil, err
}
if !response.Handled {
return nil, nil
}
if len(response.Error) != 0 {
message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ","))
return nil, errors.New(message)
}
return response.Value, nil
}
return nil, nil
}

View File

@ -0,0 +1,85 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"bytes"
"fmt"
"os/exec"
"strings"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
extensions "github.com/googleapis/gnostic/extensions"
yaml "gopkg.in/yaml.v3"
)
// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
type ExtensionHandler struct {
Name string
}
// CallExtension calls a binary extension handler.
func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) {
if context == nil || context.ExtensionHandlers == nil {
return false, nil, nil
}
handled = false
for _, handler := range *(context.ExtensionHandlers) {
response, err = handler.handle(in, extensionName)
if response == nil {
continue
} else {
handled = true
break
}
}
return handled, response, err
}
func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) {
if extensionHandlers.Name != "" {
yamlData, _ := yaml.Marshal(in)
request := &extensions.ExtensionHandlerRequest{
CompilerVersion: &extensions.Version{
Major: 0,
Minor: 1,
Patch: 0,
},
Wrapper: &extensions.Wrapper{
Version: "unknown", // TODO: set this to the type/version of spec being parsed.
Yaml: string(yamlData),
ExtensionName: extensionName,
},
}
requestBytes, _ := proto.Marshal(request)
cmd := exec.Command(extensionHandlers.Name)
cmd.Stdin = bytes.NewReader(requestBytes)
output, err := cmd.Output()
if err != nil {
return nil, err
}
response := &extensions.ExtensionHandlerResponse{}
err = proto.Unmarshal(output, response)
if err != nil || !response.Handled {
return nil, err
}
if len(response.Errors) != 0 {
return nil, fmt.Errorf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Errors, ","))
}
return response.Value, nil
}
return nil, nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -16,56 +16,63 @@ package compiler
import (
"fmt"
"gopkg.in/yaml.v2"
"regexp"
"sort"
"strconv"
"github.com/googleapis/gnostic/jsonschema"
"gopkg.in/yaml.v3"
)
// compiler helper functions, usually called from generated code
// UnpackMap gets a yaml.MapSlice if possible.
func UnpackMap(in interface{}) (yaml.MapSlice, bool) {
m, ok := in.(yaml.MapSlice)
if ok {
return m, true
// UnpackMap gets a *yaml.Node if possible.
func UnpackMap(in *yaml.Node) (*yaml.Node, bool) {
if in == nil {
return nil, false
}
// do we have an empty array?
a, ok := in.([]interface{})
if ok && len(a) == 0 {
// if so, return an empty map
return yaml.MapSlice{}, true
}
return nil, false
return in, true
}
// SortedKeysForMap returns the sorted keys of a yaml.MapSlice.
func SortedKeysForMap(m yaml.MapSlice) []string {
// SortedKeysForMap returns the sorted keys of a yamlv2.MapSlice.
func SortedKeysForMap(m *yaml.Node) []string {
keys := make([]string, 0)
for _, item := range m {
keys = append(keys, item.Key.(string))
if m.Kind == yaml.MappingNode {
for i := 0; i < len(m.Content); i += 2 {
keys = append(keys, m.Content[i].Value)
}
}
sort.Strings(keys)
return keys
}
// MapHasKey returns true if a yaml.MapSlice contains a specified key.
func MapHasKey(m yaml.MapSlice, key string) bool {
for _, item := range m {
itemKey, ok := item.Key.(string)
if ok && key == itemKey {
return true
// MapHasKey returns true if a yamlv2.MapSlice contains a specified key.
func MapHasKey(m *yaml.Node, key string) bool {
if m == nil {
return false
}
if m.Kind == yaml.MappingNode {
for i := 0; i < len(m.Content); i += 2 {
itemKey := m.Content[i].Value
if key == itemKey {
return true
}
}
}
return false
}
// MapValueForKey gets the value of a map value for a specified key.
func MapValueForKey(m yaml.MapSlice, key string) interface{} {
for _, item := range m {
itemKey, ok := item.Key.(string)
if ok && key == itemKey {
return item.Value
func MapValueForKey(m *yaml.Node, key string) *yaml.Node {
if m == nil {
return nil
}
if m.Kind == yaml.MappingNode {
for i := 0; i < len(m.Content); i += 2 {
itemKey := m.Content[i].Value
if key == itemKey {
return m.Content[i+1]
}
}
}
return nil
@ -83,8 +90,118 @@ func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string {
return stringArray
}
// SequenceNodeForNode returns a node if it is a SequenceNode.
func SequenceNodeForNode(node *yaml.Node) (*yaml.Node, bool) {
if node.Kind != yaml.SequenceNode {
return nil, false
}
return node, true
}
// BoolForScalarNode returns the bool value of a node.
func BoolForScalarNode(node *yaml.Node) (bool, bool) {
if node == nil {
return false, false
}
if node.Kind == yaml.DocumentNode {
return BoolForScalarNode(node.Content[0])
}
if node.Kind != yaml.ScalarNode {
return false, false
}
if node.Tag != "!!bool" {
return false, false
}
v, err := strconv.ParseBool(node.Value)
if err != nil {
return false, false
}
return v, true
}
// IntForScalarNode returns the integer value of a node.
func IntForScalarNode(node *yaml.Node) (int64, bool) {
if node == nil {
return 0, false
}
if node.Kind == yaml.DocumentNode {
return IntForScalarNode(node.Content[0])
}
if node.Kind != yaml.ScalarNode {
return 0, false
}
if node.Tag != "!!int" {
return 0, false
}
v, err := strconv.ParseInt(node.Value, 10, 64)
if err != nil {
return 0, false
}
return v, true
}
// FloatForScalarNode returns the float value of a node.
func FloatForScalarNode(node *yaml.Node) (float64, bool) {
if node == nil {
return 0.0, false
}
if node.Kind == yaml.DocumentNode {
return FloatForScalarNode(node.Content[0])
}
if node.Kind != yaml.ScalarNode {
return 0.0, false
}
if (node.Tag != "!!int") && (node.Tag != "!!float") {
return 0.0, false
}
v, err := strconv.ParseFloat(node.Value, 64)
if err != nil {
return 0.0, false
}
return v, true
}
// StringForScalarNode returns the string value of a node.
func StringForScalarNode(node *yaml.Node) (string, bool) {
if node == nil {
return "", false
}
if node.Kind == yaml.DocumentNode {
return StringForScalarNode(node.Content[0])
}
switch node.Kind {
case yaml.ScalarNode:
switch node.Tag {
case "!!int":
return node.Value, true
case "!!str":
return node.Value, true
case "!!timestamp":
return node.Value, true
case "!!null":
return "", true
default:
return "", false
}
default:
return "", false
}
}
// StringArrayForSequenceNode converts a sequence node to an array of strings, if possible.
func StringArrayForSequenceNode(node *yaml.Node) []string {
stringArray := make([]string, 0)
for _, item := range node.Content {
v, ok := StringForScalarNode(item)
if ok {
stringArray = append(stringArray, v)
}
}
return stringArray
}
// MissingKeysInMap identifies which keys from a list of required keys are not in a map.
func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string {
missingKeys := make([]string, 0)
for _, k := range requiredKeys {
if !MapHasKey(m, k) {
@ -95,64 +212,109 @@ func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
}
// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns.
func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string {
func InvalidKeysInMap(m *yaml.Node, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string {
invalidKeys := make([]string, 0)
for _, item := range m {
itemKey, ok := item.Key.(string)
if ok {
key := itemKey
found := false
// does the key match an allowed key?
for _, allowedKey := range allowedKeys {
if key == allowedKey {
if m == nil || m.Kind != yaml.MappingNode {
return invalidKeys
}
for i := 0; i < len(m.Content); i += 2 {
key := m.Content[i].Value
found := false
// does the key match an allowed key?
for _, allowedKey := range allowedKeys {
if key == allowedKey {
found = true
break
}
}
if !found {
// does the key match an allowed pattern?
for _, allowedPattern := range allowedPatterns {
if allowedPattern.MatchString(key) {
found = true
break
}
}
if !found {
// does the key match an allowed pattern?
for _, allowedPattern := range allowedPatterns {
if allowedPattern.MatchString(key) {
found = true
break
}
}
if !found {
invalidKeys = append(invalidKeys, key)
}
invalidKeys = append(invalidKeys, key)
}
}
}
return invalidKeys
}
// DescribeMap describes a map (for debugging purposes).
func DescribeMap(in interface{}, indent string) string {
description := ""
m, ok := in.(map[string]interface{})
if ok {
keys := make([]string, 0)
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
v := m[k]
description += fmt.Sprintf("%s%s:\n", indent, k)
description += DescribeMap(v, indent+" ")
}
return description
// NewNullNode creates a new Null node.
func NewNullNode() *yaml.Node {
node := &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!null",
}
a, ok := in.([]interface{})
if ok {
for i, v := range a {
description += fmt.Sprintf("%s%d:\n", indent, i)
description += DescribeMap(v, indent+" ")
}
return description
return node
}
// NewMappingNode creates a new Mapping node.
func NewMappingNode() *yaml.Node {
return &yaml.Node{
Kind: yaml.MappingNode,
Content: make([]*yaml.Node, 0),
}
}
// NewSequenceNode creates a new Sequence node.
func NewSequenceNode() *yaml.Node {
node := &yaml.Node{
Kind: yaml.SequenceNode,
Content: make([]*yaml.Node, 0),
}
return node
}
// NewScalarNodeForString creates a new node to hold a string.
func NewScalarNodeForString(s string) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!str",
Value: s,
}
}
// NewSequenceNodeForStringArray creates a new node to hold an array of strings.
func NewSequenceNodeForStringArray(strings []string) *yaml.Node {
node := &yaml.Node{
Kind: yaml.SequenceNode,
Content: make([]*yaml.Node, 0),
}
for _, s := range strings {
node.Content = append(node.Content, NewScalarNodeForString(s))
}
return node
}
// NewScalarNodeForBool creates a new node to hold a bool.
func NewScalarNodeForBool(b bool) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!bool",
Value: fmt.Sprintf("%t", b),
}
}
// NewScalarNodeForFloat creates a new node to hold a float.
func NewScalarNodeForFloat(f float64) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!float",
Value: fmt.Sprintf("%g", f),
}
}
// NewScalarNodeForInt creates a new node to hold an integer.
func NewScalarNodeForInt(i int64) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!int",
Value: fmt.Sprintf("%d", i),
}
description += fmt.Sprintf("%s%+v\n", indent, in)
return description
}
// PluralProperties returns the string "properties" pluralized.
@ -195,3 +357,40 @@ func StringValue(item interface{}) (value string, ok bool) {
}
return "", false
}
// Description returns a human-readable represention of an item.
func Description(item interface{}) string {
value, ok := item.(*yaml.Node)
if ok {
return jsonschema.Render(value)
}
return fmt.Sprintf("%+v", item)
}
// Display returns a description of a node for use in error messages.
func Display(node *yaml.Node) string {
switch node.Kind {
case yaml.ScalarNode:
switch node.Tag {
case "!!str":
return fmt.Sprintf("%s (string)", node.Value)
}
}
return fmt.Sprintf("%+v (%T)", node, node)
}
// Marshal creates a yaml version of a structure in our preferred style
func Marshal(in *yaml.Node) []byte {
clearStyle(in)
//bytes, _ := yaml.Marshal(&yaml.Node{Kind: yaml.DocumentNode, Content: []*yaml.Node{in}})
bytes, _ := yaml.Marshal(in)
return bytes
}
func clearStyle(node *yaml.Node) {
node.Style = 0
for _, c := range node.Content {
clearStyle(c)
}
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,7 +15,6 @@
package compiler
import (
"errors"
"fmt"
"io/ioutil"
"log"
@ -23,18 +22,30 @@ import (
"net/url"
"path/filepath"
"strings"
"sync"
yaml "gopkg.in/yaml.v2"
yaml "gopkg.in/yaml.v3"
)
var fileCache map[string][]byte
var infoCache map[string]interface{}
var count int64
var verboseReader = false
var fileCache map[string][]byte
var infoCache map[string]*yaml.Node
var fileCacheEnable = true
var infoCacheEnable = true
// These locks are used to synchronize accesses to the fileCache and infoCache
// maps (above). They are global state and can throw thread-related errors
// when modified from separate goroutines. The general strategy is to protect
// all public functions in this file with mutex Lock() calls. As a result, to
// avoid deadlock, these public functions should not call other public
// functions, so some public functions have private equivalents.
// In the future, we might consider replacing the maps with sync.Map and
// eliminating these mutexes.
var fileCacheMutex sync.Mutex
var infoCacheMutex sync.Mutex
func initializeFileCache() {
if fileCache == nil {
fileCache = make(map[string][]byte, 0)
@ -43,27 +54,42 @@ func initializeFileCache() {
func initializeInfoCache() {
if infoCache == nil {
infoCache = make(map[string]interface{}, 0)
infoCache = make(map[string]*yaml.Node, 0)
}
}
// EnableFileCache turns on file caching.
func EnableFileCache() {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
fileCacheEnable = true
}
// EnableInfoCache turns on parsed info caching.
func EnableInfoCache() {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
infoCacheEnable = true
}
// DisableFileCache turns off file caching.
func DisableFileCache() {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
fileCacheEnable = false
}
// DisableInfoCache turns off parsed info caching.
func DisableInfoCache() {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
infoCacheEnable = false
}
// RemoveFromFileCache removes an entry from the file cache.
func RemoveFromFileCache(fileurl string) {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
if !fileCacheEnable {
return
}
@ -71,7 +97,10 @@ func RemoveFromFileCache(fileurl string) {
delete(fileCache, fileurl)
}
// RemoveFromInfoCache removes an entry from the info cache.
func RemoveFromInfoCache(filename string) {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
if !infoCacheEnable {
return
}
@ -79,21 +108,31 @@ func RemoveFromInfoCache(filename string) {
delete(infoCache, filename)
}
func GetInfoCache() map[string]interface{} {
// GetInfoCache returns the info cache map.
func GetInfoCache() map[string]*yaml.Node {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
if infoCache == nil {
initializeInfoCache()
}
return infoCache
}
// ClearFileCache clears the file cache.
func ClearFileCache() {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
fileCache = make(map[string][]byte, 0)
}
// ClearInfoCache clears the info cache.
func ClearInfoCache() {
infoCache = make(map[string]interface{})
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
infoCache = make(map[string]*yaml.Node)
}
// ClearCaches clears all caches.
func ClearCaches() {
ClearFileCache()
ClearInfoCache()
@ -101,6 +140,12 @@ func ClearCaches() {
// FetchFile gets a specified file from the local filesystem or a remote location.
func FetchFile(fileurl string) ([]byte, error) {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
return fetchFile(fileurl)
}
func fetchFile(fileurl string) ([]byte, error) {
var bytes []byte
initializeFileCache()
if fileCacheEnable {
@ -121,7 +166,7 @@ func FetchFile(fileurl string) ([]byte, error) {
}
defer response.Body.Close()
if response.StatusCode != 200 {
return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status))
return nil, fmt.Errorf("Error downloading %s: %s", fileurl, response.Status)
}
bytes, err = ioutil.ReadAll(response.Body)
if fileCacheEnable && err == nil {
@ -132,11 +177,17 @@ func FetchFile(fileurl string) ([]byte, error) {
// ReadBytesForFile reads the bytes of a file.
func ReadBytesForFile(filename string) ([]byte, error) {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
return readBytesForFile(filename)
}
func readBytesForFile(filename string) ([]byte, error) {
// is the filename a url?
fileurl, _ := url.Parse(filename)
if fileurl.Scheme != "" {
// yes, fetch it
bytes, err := FetchFile(filename)
bytes, err := fetchFile(filename)
if err != nil {
return nil, err
}
@ -150,8 +201,14 @@ func ReadBytesForFile(filename string) ([]byte, error) {
return bytes, nil
}
// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice.
func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
// ReadInfoFromBytes unmarshals a file as a *yaml.Node.
func ReadInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) {
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
return readInfoFromBytes(filename, bytes)
}
func readInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) {
initializeInfoCache()
if infoCacheEnable {
cachedInfo, ok := infoCache[filename]
@ -165,19 +222,23 @@ func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
log.Printf("Reading info for file %s", filename)
}
}
var info yaml.MapSlice
var info yaml.Node
err := yaml.Unmarshal(bytes, &info)
if err != nil {
return nil, err
}
if infoCacheEnable && len(filename) > 0 {
infoCache[filename] = info
infoCache[filename] = &info
}
return info, nil
return &info, nil
}
// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref.
func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) {
fileCacheMutex.Lock()
defer fileCacheMutex.Unlock()
infoCacheMutex.Lock()
defer infoCacheMutex.Unlock()
initializeInfoCache()
if infoCacheEnable {
info, ok := infoCache[ref]
@ -191,7 +252,6 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
log.Printf("Reading info for ref %s#%s", basefile, ref)
}
}
count = count + 1
basedir, _ := filepath.Split(basefile)
parts := strings.Split(ref, "#")
var filename string
@ -204,24 +264,30 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
} else {
filename = basefile
}
bytes, err := ReadBytesForFile(filename)
bytes, err := readBytesForFile(filename)
if err != nil {
return nil, err
}
info, err := ReadInfoFromBytes(filename, bytes)
info, err := readInfoFromBytes(filename, bytes)
if info != nil && info.Kind == yaml.DocumentNode {
info = info.Content[0]
}
if err != nil {
log.Printf("File error: %v\n", err)
} else {
if info == nil {
return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref))
}
if len(parts) > 1 {
path := strings.Split(parts[1], "/")
for i, key := range path {
if i > 0 {
m, ok := info.(yaml.MapSlice)
if ok {
m := info
if true {
found := false
for _, section := range m {
if section.Key == key {
info = section.Value
for i := 0; i < len(m.Content); i += 2 {
if m.Content[i].Value == key {
info = m.Content[i+1]
found = true
}
}

View File

@ -13,5 +13,7 @@ go_library(
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/github.com/golang/protobuf/ptypes:go_default_library",
"//vendor/github.com/golang/protobuf/ptypes/any:go_default_library",
"//vendor/google.golang.org/protobuf/reflect/protoreflect:go_default_library",
"//vendor/google.golang.org/protobuf/runtime/protoimpl:go_default_library",
],
)

View File

@ -1,5 +1,13 @@
# Extensions
This directory contains support code for building Gnostic extensions and associated examples.
**Extension Support is experimental.**
Extensions are used to compile vendor or specification extensions into protocol buffer structures.
This directory contains support code for building Gnostic extensio handlers and
associated examples.
Extension handlers can be used to compile vendor or specification extensions
into protocol buffer structures.
Like plugins, extension handlers are built as separate executables. Extension
bodies are written to extension handlers as serialized
ExtensionHandlerRequests.

View File

@ -1,148 +1,186 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.24.0
// protoc v3.12.0
// source: extensions/extension.proto
package openapiextension_v1
package gnostic_extension_v1
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
any "github.com/golang/protobuf/ptypes/any"
math "math"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// The version number of OpenAPI compiler.
// The version number of Gnostic.
type Version struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"`
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
// be empty for mainline stable releases.
Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"`
}
func (m *Version) Reset() { *m = Version{} }
func (m *Version) String() string { return proto.CompactTextString(m) }
func (*Version) ProtoMessage() {}
func (x *Version) Reset() {
*x = Version{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Version) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Version) ProtoMessage() {}
func (x *Version) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Version.ProtoReflect.Descriptor instead.
func (*Version) Descriptor() ([]byte, []int) {
return fileDescriptor_661e47e790f76671, []int{0}
return file_extensions_extension_proto_rawDescGZIP(), []int{0}
}
func (m *Version) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Version.Unmarshal(m, b)
}
func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Version.Marshal(b, m, deterministic)
}
func (m *Version) XXX_Merge(src proto.Message) {
xxx_messageInfo_Version.Merge(m, src)
}
func (m *Version) XXX_Size() int {
return xxx_messageInfo_Version.Size(m)
}
func (m *Version) XXX_DiscardUnknown() {
xxx_messageInfo_Version.DiscardUnknown(m)
}
var xxx_messageInfo_Version proto.InternalMessageInfo
func (m *Version) GetMajor() int32 {
if m != nil {
return m.Major
func (x *Version) GetMajor() int32 {
if x != nil {
return x.Major
}
return 0
}
func (m *Version) GetMinor() int32 {
if m != nil {
return m.Minor
func (x *Version) GetMinor() int32 {
if x != nil {
return x.Minor
}
return 0
}
func (m *Version) GetPatch() int32 {
if m != nil {
return m.Patch
func (x *Version) GetPatch() int32 {
if x != nil {
return x.Patch
}
return 0
}
func (m *Version) GetSuffix() string {
if m != nil {
return m.Suffix
func (x *Version) GetSuffix() string {
if x != nil {
return x.Suffix
}
return ""
}
// An encoded Request is written to the ExtensionHandler's stdin.
type ExtensionHandlerRequest struct {
// The OpenAPI descriptions that were explicitly listed on the command line.
// The specifications will appear in the order they are specified to gnostic.
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The extension to process.
Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"`
// The version number of openapi compiler.
CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
// The version number of Gnostic.
CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"`
}
func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} }
func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) }
func (*ExtensionHandlerRequest) ProtoMessage() {}
func (x *ExtensionHandlerRequest) Reset() {
*x = ExtensionHandlerRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ExtensionHandlerRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExtensionHandlerRequest) ProtoMessage() {}
func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead.
func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_661e47e790f76671, []int{1}
return file_extensions_extension_proto_rawDescGZIP(), []int{1}
}
func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b)
}
func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic)
}
func (m *ExtensionHandlerRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExtensionHandlerRequest.Merge(m, src)
}
func (m *ExtensionHandlerRequest) XXX_Size() int {
return xxx_messageInfo_ExtensionHandlerRequest.Size(m)
}
func (m *ExtensionHandlerRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ExtensionHandlerRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ExtensionHandlerRequest proto.InternalMessageInfo
func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper {
if m != nil {
return m.Wrapper
func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper {
if x != nil {
return x.Wrapper
}
return nil
}
func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version {
if m != nil {
return m.CompilerVersion
func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version {
if x != nil {
return x.CompilerVersion
}
return nil
}
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
type ExtensionHandlerResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// true if the extension is handled by the extension handler; false otherwise
Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"`
// Error message. If non-empty, the extension handling failed.
// Error message(s). If non-empty, the extension handling failed.
// The extension handler process should exit with status code zero
// even if it reports an error in this way.
//
@ -151,150 +189,277 @@ type ExtensionHandlerResponse struct {
// itself -- such as the input Document being unparseable -- should be
// reported by writing a message to stderr and exiting with a non-zero
// status code.
Error []string `protobuf:"bytes,2,rep,name=error,proto3" json:"error,omitempty"`
Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"`
// text output
Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} }
func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) }
func (*ExtensionHandlerResponse) ProtoMessage() {}
func (x *ExtensionHandlerResponse) Reset() {
*x = ExtensionHandlerResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ExtensionHandlerResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ExtensionHandlerResponse) ProtoMessage() {}
func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead.
func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_661e47e790f76671, []int{2}
return file_extensions_extension_proto_rawDescGZIP(), []int{2}
}
func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b)
}
func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic)
}
func (m *ExtensionHandlerResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExtensionHandlerResponse.Merge(m, src)
}
func (m *ExtensionHandlerResponse) XXX_Size() int {
return xxx_messageInfo_ExtensionHandlerResponse.Size(m)
}
func (m *ExtensionHandlerResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ExtensionHandlerResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ExtensionHandlerResponse proto.InternalMessageInfo
func (m *ExtensionHandlerResponse) GetHandled() bool {
if m != nil {
return m.Handled
func (x *ExtensionHandlerResponse) GetHandled() bool {
if x != nil {
return x.Handled
}
return false
}
func (m *ExtensionHandlerResponse) GetError() []string {
if m != nil {
return m.Error
func (x *ExtensionHandlerResponse) GetErrors() []string {
if x != nil {
return x.Errors
}
return nil
}
func (m *ExtensionHandlerResponse) GetValue() *any.Any {
if m != nil {
return m.Value
func (x *ExtensionHandlerResponse) GetValue() *any.Any {
if x != nil {
return x.Value
}
return nil
}
type Wrapper struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// version of the OpenAPI specification in which this extension was written.
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
// Name of the extension
// Name of the extension.
ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"`
// Must be a valid yaml for the proto
Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
// YAML-formatted extension value.
Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"`
}
func (m *Wrapper) Reset() { *m = Wrapper{} }
func (m *Wrapper) String() string { return proto.CompactTextString(m) }
func (*Wrapper) ProtoMessage() {}
func (x *Wrapper) Reset() {
*x = Wrapper{}
if protoimpl.UnsafeEnabled {
mi := &file_extensions_extension_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Wrapper) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Wrapper) ProtoMessage() {}
func (x *Wrapper) ProtoReflect() protoreflect.Message {
mi := &file_extensions_extension_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Wrapper.ProtoReflect.Descriptor instead.
func (*Wrapper) Descriptor() ([]byte, []int) {
return fileDescriptor_661e47e790f76671, []int{3}
return file_extensions_extension_proto_rawDescGZIP(), []int{3}
}
func (m *Wrapper) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Wrapper.Unmarshal(m, b)
}
func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic)
}
func (m *Wrapper) XXX_Merge(src proto.Message) {
xxx_messageInfo_Wrapper.Merge(m, src)
}
func (m *Wrapper) XXX_Size() int {
return xxx_messageInfo_Wrapper.Size(m)
}
func (m *Wrapper) XXX_DiscardUnknown() {
xxx_messageInfo_Wrapper.DiscardUnknown(m)
}
var xxx_messageInfo_Wrapper proto.InternalMessageInfo
func (m *Wrapper) GetVersion() string {
if m != nil {
return m.Version
func (x *Wrapper) GetVersion() string {
if x != nil {
return x.Version
}
return ""
}
func (m *Wrapper) GetExtensionName() string {
if m != nil {
return m.ExtensionName
func (x *Wrapper) GetExtensionName() string {
if x != nil {
return x.ExtensionName
}
return ""
}
func (m *Wrapper) GetYaml() string {
if m != nil {
return m.Yaml
func (x *Wrapper) GetYaml() string {
if x != nil {
return x.Yaml
}
return ""
}
func init() {
proto.RegisterType((*Version)(nil), "openapiextension.v1.Version")
proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest")
proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse")
proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper")
var File_extensions_extension_proto protoreflect.FileDescriptor
var file_extensions_extension_proto_rawDesc = []byte{
0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74,
0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e,
0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e,
0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a,
0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f,
0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14,
0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d,
0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20,
0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75,
0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66,
0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37,
0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07,
0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69,
0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65,
0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61,
0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a,
0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72,
0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12,
0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57,
0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61,
0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4b, 0x0a, 0x0e, 0x6f,
0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47,
0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50,
0x01, 0x5a, 0x1f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x67, 0x6e,
0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f,
0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
func init() { proto.RegisterFile("extensions/extension.proto", fileDescriptor_661e47e790f76671) }
var (
file_extensions_extension_proto_rawDescOnce sync.Once
file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc
)
var fileDescriptor_661e47e790f76671 = []byte{
// 362 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xeb, 0x40,
0x18, 0x85, 0x49, 0xbf, 0x72, 0x33, 0x97, 0xdb, 0x2b, 0x63, 0xd1, 0x58, 0x5c, 0x94, 0x80, 0x50,
0x44, 0xa6, 0x54, 0xc1, 0x7d, 0x0b, 0x45, 0xdd, 0xd8, 0x32, 0x8b, 0xba, 0xb3, 0x4c, 0xd3, 0xb7,
0x69, 0x24, 0x99, 0x19, 0x27, 0x1f, 0xb6, 0x7f, 0xc5, 0xa5, 0xbf, 0x54, 0x32, 0x93, 0xc4, 0x85,
0xba, 0x9b, 0xf3, 0x70, 0xda, 0xf7, 0x9c, 0x13, 0xd4, 0x87, 0x7d, 0x0a, 0x3c, 0x09, 0x05, 0x4f,
0x46, 0xf5, 0x93, 0x48, 0x25, 0x52, 0x81, 0x8f, 0x85, 0x04, 0xce, 0x64, 0xf8, 0xc5, 0xf3, 0x71,
0xff, 0x2c, 0x10, 0x22, 0x88, 0x60, 0xa4, 0x2d, 0xeb, 0x6c, 0x3b, 0x62, 0xfc, 0x60, 0xfc, 0x9e,
0x8f, 0xec, 0x25, 0xa8, 0xc2, 0x88, 0x7b, 0xa8, 0x1d, 0xb3, 0x17, 0xa1, 0x5c, 0x6b, 0x60, 0x0d,
0xdb, 0xd4, 0x08, 0x4d, 0x43, 0x2e, 0x94, 0xdb, 0x28, 0x69, 0x21, 0x0a, 0x2a, 0x59, 0xea, 0xef,
0xdc, 0xa6, 0xa1, 0x5a, 0xe0, 0x13, 0xd4, 0x49, 0xb2, 0xed, 0x36, 0xdc, 0xbb, 0xad, 0x81, 0x35,
0x74, 0x68, 0xa9, 0xbc, 0x77, 0x0b, 0x9d, 0xce, 0xaa, 0x40, 0xf7, 0x8c, 0x6f, 0x22, 0x50, 0x14,
0x5e, 0x33, 0x48, 0x52, 0x7c, 0x8b, 0xec, 0x37, 0xc5, 0xa4, 0x04, 0x73, 0xf7, 0xef, 0xf5, 0x39,
0xf9, 0xa1, 0x02, 0x79, 0x32, 0x1e, 0x5a, 0x99, 0xf1, 0x1d, 0x3a, 0xf2, 0x45, 0x2c, 0xc3, 0x08,
0xd4, 0x2a, 0x37, 0x0d, 0x74, 0x98, 0xdf, 0xfe, 0xa0, 0x6c, 0x49, 0xff, 0x57, 0xbf, 0x2a, 0x81,
0x97, 0x23, 0xf7, 0x7b, 0xb6, 0x44, 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd3, 0x68, 0xa3, 0xc3,
0xfd, 0xa1, 0x95, 0x2c, 0x06, 0x00, 0xa5, 0xf4, 0x2c, 0xcd, 0xa1, 0x43, 0x8d, 0xc0, 0x97, 0xa8,
0x9d, 0xb3, 0x28, 0x83, 0x32, 0x49, 0x8f, 0x98, 0xe1, 0x49, 0x35, 0x3c, 0x99, 0xf0, 0x03, 0x35,
0x16, 0xef, 0x19, 0xd9, 0x65, 0xa9, 0xe2, 0x4c, 0x55, 0xc1, 0xd2, 0xc3, 0x55, 0x12, 0x5f, 0xa0,
0x6e, 0xdd, 0x62, 0xc5, 0x59, 0x0c, 0xfa, 0x33, 0x38, 0xf4, 0x5f, 0x4d, 0x1f, 0x59, 0x0c, 0x18,
0xa3, 0xd6, 0x81, 0xc5, 0x91, 0x3e, 0xeb, 0x50, 0xfd, 0x9e, 0x5e, 0xa1, 0xae, 0x50, 0x01, 0x09,
0xb8, 0x48, 0xd2, 0xd0, 0x27, 0xf9, 0x78, 0x8a, 0xe7, 0x12, 0xf8, 0x64, 0xf1, 0x50, 0xd7, 0x5d,
0x8e, 0x17, 0xd6, 0x47, 0xa3, 0x39, 0x9f, 0xcc, 0xd6, 0x1d, 0x1d, 0xf1, 0xe6, 0x33, 0x00, 0x00,
0xff, 0xff, 0xeb, 0xf3, 0xfa, 0x65, 0x5c, 0x02, 0x00, 0x00,
func file_extensions_extension_proto_rawDescGZIP() []byte {
file_extensions_extension_proto_rawDescOnce.Do(func() {
file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData)
})
return file_extensions_extension_proto_rawDescData
}
var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_extensions_extension_proto_goTypes = []interface{}{
(*Version)(nil), // 0: gnostic.extension.v1.Version
(*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest
(*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse
(*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper
(*any.Any)(nil), // 4: google.protobuf.Any
}
var file_extensions_extension_proto_depIdxs = []int32{
3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper
0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version
4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_extensions_extension_proto_init() }
func file_extensions_extension_proto_init() {
if File_extensions_extension_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Version); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExtensionHandlerRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExtensionHandlerResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Wrapper); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_extensions_extension_proto_rawDesc,
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_extensions_extension_proto_goTypes,
DependencyIndexes: file_extensions_extension_proto_depIdxs,
MessageInfos: file_extensions_extension_proto_msgTypes,
}.Build()
File_extensions_extension_proto = out.File
file_extensions_extension_proto_rawDesc = nil
file_extensions_extension_proto_goTypes = nil
file_extensions_extension_proto_depIdxs = nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -14,8 +14,9 @@
syntax = "proto3";
package gnostic.extension.v1;
import "google/protobuf/any.proto";
package openapiextension.v1;
// This option lets the proto compiler generate Java code inside the package
// name (see below) instead of inside an outer class. It creates a simpler
@ -26,7 +27,7 @@ option java_multiple_files = true;
// The Java outer classname should be the filename in UpperCamelCase. This
// class is only used to hold proto descriptor, so developers don't need to
// work with it directly.
option java_outer_classname = "OpenAPIExtensionV1";
option java_outer_classname = "GnosticExtension";
// The Java package name must be proto package name with proper prefix.
option java_package = "org.gnostic.v1";
@ -37,9 +38,12 @@ option java_package = "org.gnostic.v1";
// hopefully unique enough to not conflict with things that may come along in
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
//
option objc_class_prefix = "OAE"; // "OpenAPI Extension"
option objc_class_prefix = "GNX"; // "Gnostic Extension"
// The version number of OpenAPI compiler.
// The Go package name.
option go_package = "extensions;gnostic_extension_v1";
// The version number of Gnostic.
message Version {
int32 major = 1;
int32 minor = 2;
@ -52,12 +56,11 @@ message Version {
// An encoded Request is written to the ExtensionHandler's stdin.
message ExtensionHandlerRequest {
// The OpenAPI descriptions that were explicitly listed on the command line.
// The specifications will appear in the order they are specified to gnostic.
// The extension to process.
Wrapper wrapper = 1;
// The version number of openapi compiler.
Version compiler_version = 3;
// The version number of Gnostic.
Version compiler_version = 2;
}
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
@ -66,7 +69,7 @@ message ExtensionHandlerResponse {
// true if the extension is handled by the extension handler; false otherwise
bool handled = 1;
// Error message. If non-empty, the extension handling failed.
// Error message(s). If non-empty, the extension handling failed.
// The extension handler process should exit with status code zero
// even if it reports an error in this way.
//
@ -75,7 +78,7 @@ message ExtensionHandlerResponse {
// itself -- such as the input Document being unparseable -- should be
// reported by writing a message to stderr and exiting with a non-zero
// status code.
repeated string error = 2;
repeated string errors = 2;
// text output
google.protobuf.Any value = 3;
@ -85,9 +88,9 @@ message Wrapper {
// version of the OpenAPI specification in which this extension was written.
string version = 1;
// Name of the extension
// Name of the extension.
string extension_name = 2;
// Must be a valid yaml for the proto
// YAML-formatted extension value.
string yaml = 3;
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -12,71 +12,53 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package openapiextension_v1
package gnostic_extension_v1
import (
"fmt"
"io/ioutil"
"log"
"os"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
)
type documentHandler func(version string, extensionName string, document string)
type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error)
func forInputYamlFromOpenapic(handler documentHandler) {
// Main implements the main program of an extension handler.
func Main(handler extensionHandler) {
// unpack the request
data, err := ioutil.ReadAll(os.Stdin)
if err != nil {
fmt.Println("File error:", err.Error())
log.Println("File error:", err.Error())
os.Exit(1)
}
if len(data) == 0 {
fmt.Println("No input data.")
log.Println("No input data.")
os.Exit(1)
}
request := &ExtensionHandlerRequest{}
err = proto.Unmarshal(data, request)
if err != nil {
fmt.Println("Input error:", err.Error())
log.Println("Input error:", err.Error())
os.Exit(1)
}
handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml)
}
// ProcessExtension calles the handler for a specified extension.
func ProcessExtension(handleExtension extensionHandler) {
response := &ExtensionHandlerResponse{}
forInputYamlFromOpenapic(
func(version string, extensionName string, yamlInput string) {
var newObject proto.Message
var err error
handled, newObject, err := handleExtension(extensionName, yamlInput)
if !handled {
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
os.Exit(0)
}
// If we reach here, then the extension is handled
response.Handled = true
if err != nil {
response.Error = append(response.Error, err.Error())
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
os.Exit(0)
}
response.Value, err = ptypes.MarshalAny(newObject)
if err != nil {
response.Error = append(response.Error, err.Error())
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
os.Exit(0)
}
})
// call the handler
handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml)
// respond with the output of the handler
response := &ExtensionHandlerResponse{
Handled: false, // default assumption
Errors: make([]string, 0),
}
if err != nil {
response.Errors = append(response.Errors, err.Error())
} else if handled {
response.Handled = true
response.Value, err = ptypes.MarshalAny(output)
if err != nil {
response.Errors = append(response.Errors, err.Error())
}
}
responseBytes, _ := proto.Marshal(response)
os.Stdout.Write(responseBytes)
}

View File

@ -0,0 +1,17 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"base.go",
"display.go",
"models.go",
"operations.go",
"reader.go",
"writer.go",
],
importmap = "k8s.io/kops/vendor/github.com/googleapis/gnostic/jsonschema",
importpath = "github.com/googleapis/gnostic/jsonschema",
visibility = ["//visibility:public"],
deps = ["//vendor/gopkg.in/yaml.v3:go_default_library"],
)

View File

@ -0,0 +1,4 @@
# jsonschema
This directory contains code for reading, writing, and manipulating JSON
schemas.

View File

@ -0,0 +1,84 @@
// THIS FILE IS AUTOMATICALLY GENERATED.
package jsonschema
import (
"encoding/base64"
)
func baseSchemaBytes() ([]byte, error){
return base64.StdEncoding.DecodeString(
`ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi
JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl
c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK
ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg
ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg
ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp
bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp
dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl
ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK
ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v
bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg
ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki
LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p
bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s
CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog
ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg
ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog
ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg
ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog
ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6
IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog
ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1
ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl
ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw
ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg
ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg
ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg
ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg
IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0
aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg
ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg
ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg
ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK
ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi
ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP
ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy
ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg
ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv
ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi
OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl
SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs
dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k
ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk
cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl
cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh
ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg
ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg
ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk
ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk
ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6
IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi
b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9
LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl
cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv
bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog
ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq
ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg
ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg
ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg
ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg
ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu
aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh
bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl
cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs
CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs
ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg
ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg
ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy
YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5
IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg
fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6
IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1
c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)}

View File

@ -0,0 +1,229 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonschema
import (
"fmt"
"strings"
)
//
// DISPLAY
// The following methods display Schemas.
//
// Description returns a string representation of a string or string array.
func (s *StringOrStringArray) Description() string {
if s.String != nil {
return *s.String
}
if s.StringArray != nil {
return strings.Join(*s.StringArray, ", ")
}
return ""
}
// Returns a string representation of a Schema.
func (schema *Schema) String() string {
return schema.describeSchema("")
}
// Helper: Returns a string representation of a Schema indented by a specified string.
func (schema *Schema) describeSchema(indent string) string {
result := ""
if schema.Schema != nil {
result += indent + "$schema: " + *(schema.Schema) + "\n"
}
if schema.ID != nil {
result += indent + "id: " + *(schema.ID) + "\n"
}
if schema.MultipleOf != nil {
result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf))
}
if schema.Maximum != nil {
result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum))
}
if schema.ExclusiveMaximum != nil {
result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum))
}
if schema.Minimum != nil {
result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum))
}
if schema.ExclusiveMinimum != nil {
result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum))
}
if schema.MaxLength != nil {
result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength))
}
if schema.MinLength != nil {
result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength))
}
if schema.Pattern != nil {
result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern))
}
if schema.AdditionalItems != nil {
s := schema.AdditionalItems.Schema
if s != nil {
result += indent + "additionalItems:\n"
result += s.describeSchema(indent + " ")
} else {
b := *(schema.AdditionalItems.Boolean)
result += indent + fmt.Sprintf("additionalItems: %+v\n", b)
}
}
if schema.Items != nil {
result += indent + "items:\n"
items := schema.Items
if items.SchemaArray != nil {
for i, s := range *(items.SchemaArray) {
result += indent + " " + fmt.Sprintf("%d", i) + ":\n"
result += s.describeSchema(indent + " " + " ")
}
} else if items.Schema != nil {
result += items.Schema.describeSchema(indent + " " + " ")
}
}
if schema.MaxItems != nil {
result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems))
}
if schema.MinItems != nil {
result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems))
}
if schema.UniqueItems != nil {
result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems))
}
if schema.MaxProperties != nil {
result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties))
}
if schema.MinProperties != nil {
result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties))
}
if schema.Required != nil {
result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required))
}
if schema.AdditionalProperties != nil {
s := schema.AdditionalProperties.Schema
if s != nil {
result += indent + "additionalProperties:\n"
result += s.describeSchema(indent + " ")
} else {
b := *(schema.AdditionalProperties.Boolean)
result += indent + fmt.Sprintf("additionalProperties: %+v\n", b)
}
}
if schema.Properties != nil {
result += indent + "properties:\n"
for _, pair := range *(schema.Properties) {
name := pair.Name
s := pair.Value
result += indent + " " + name + ":\n"
result += s.describeSchema(indent + " " + " ")
}
}
if schema.PatternProperties != nil {
result += indent + "patternProperties:\n"
for _, pair := range *(schema.PatternProperties) {
name := pair.Name
s := pair.Value
result += indent + " " + name + ":\n"
result += s.describeSchema(indent + " " + " ")
}
}
if schema.Dependencies != nil {
result += indent + "dependencies:\n"
for _, pair := range *(schema.Dependencies) {
name := pair.Name
schemaOrStringArray := pair.Value
s := schemaOrStringArray.Schema
if s != nil {
result += indent + " " + name + ":\n"
result += s.describeSchema(indent + " " + " ")
} else {
a := schemaOrStringArray.StringArray
if a != nil {
result += indent + " " + name + ":\n"
for _, s2 := range *a {
result += indent + " " + " " + s2 + "\n"
}
}
}
}
}
if schema.Enumeration != nil {
result += indent + "enumeration:\n"
for _, value := range *(schema.Enumeration) {
if value.String != nil {
result += indent + " " + fmt.Sprintf("%+v\n", *value.String)
} else {
result += indent + " " + fmt.Sprintf("%+v\n", *value.Bool)
}
}
}
if schema.Type != nil {
result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description())
}
if schema.AllOf != nil {
result += indent + "allOf:\n"
for _, s := range *(schema.AllOf) {
result += s.describeSchema(indent + " ")
result += indent + "-\n"
}
}
if schema.AnyOf != nil {
result += indent + "anyOf:\n"
for _, s := range *(schema.AnyOf) {
result += s.describeSchema(indent + " ")
result += indent + "-\n"
}
}
if schema.OneOf != nil {
result += indent + "oneOf:\n"
for _, s := range *(schema.OneOf) {
result += s.describeSchema(indent + " ")
result += indent + "-\n"
}
}
if schema.Not != nil {
result += indent + "not:\n"
result += schema.Not.describeSchema(indent + " ")
}
if schema.Definitions != nil {
result += indent + "definitions:\n"
for _, pair := range *(schema.Definitions) {
name := pair.Name
s := pair.Value
result += indent + " " + name + ":\n"
result += s.describeSchema(indent + " " + " ")
}
}
if schema.Title != nil {
result += indent + "title: " + *(schema.Title) + "\n"
}
if schema.Description != nil {
result += indent + "description: " + *(schema.Description) + "\n"
}
if schema.Default != nil {
result += indent + "default:\n"
result += indent + fmt.Sprintf(" %+v\n", *(schema.Default))
}
if schema.Format != nil {
result += indent + "format: " + *(schema.Format) + "\n"
}
if schema.Ref != nil {
result += indent + "$ref: " + *(schema.Ref) + "\n"
}
return result
}

View File

@ -0,0 +1,228 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package jsonschema supports the reading, writing, and manipulation
// of JSON Schemas.
package jsonschema
import "gopkg.in/yaml.v3"
// The Schema struct models a JSON Schema and, because schemas are
// defined hierarchically, contains many references to itself.
// All fields are pointers and are nil if the associated values
// are not specified.
type Schema struct {
Schema *string // $schema
ID *string // id keyword used for $ref resolution scope
Ref *string // $ref, i.e. JSON Pointers
// http://json-schema.org/latest/json-schema-validation.html
// 5.1. Validation keywords for numeric instances (number and integer)
MultipleOf *SchemaNumber
Maximum *SchemaNumber
ExclusiveMaximum *bool
Minimum *SchemaNumber
ExclusiveMinimum *bool
// 5.2. Validation keywords for strings
MaxLength *int64
MinLength *int64
Pattern *string
// 5.3. Validation keywords for arrays
AdditionalItems *SchemaOrBoolean
Items *SchemaOrSchemaArray
MaxItems *int64
MinItems *int64
UniqueItems *bool
// 5.4. Validation keywords for objects
MaxProperties *int64
MinProperties *int64
Required *[]string
AdditionalProperties *SchemaOrBoolean
Properties *[]*NamedSchema
PatternProperties *[]*NamedSchema
Dependencies *[]*NamedSchemaOrStringArray
// 5.5. Validation keywords for any instance type
Enumeration *[]SchemaEnumValue
Type *StringOrStringArray
AllOf *[]*Schema
AnyOf *[]*Schema
OneOf *[]*Schema
Not *Schema
Definitions *[]*NamedSchema
// 6. Metadata keywords
Title *string
Description *string
Default *yaml.Node
// 7. Semantic validation with "format"
Format *string
}
// These helper structs represent "combination" types that generally can
// have values of one type or another. All are used to represent parts
// of Schemas.
// SchemaNumber represents a value that can be either an Integer or a Float.
type SchemaNumber struct {
Integer *int64
Float *float64
}
// NewSchemaNumberWithInteger creates and returns a new object
func NewSchemaNumberWithInteger(i int64) *SchemaNumber {
result := &SchemaNumber{}
result.Integer = &i
return result
}
// NewSchemaNumberWithFloat creates and returns a new object
func NewSchemaNumberWithFloat(f float64) *SchemaNumber {
result := &SchemaNumber{}
result.Float = &f
return result
}
// SchemaOrBoolean represents a value that can be either a Schema or a Boolean.
type SchemaOrBoolean struct {
Schema *Schema
Boolean *bool
}
// NewSchemaOrBooleanWithSchema creates and returns a new object
func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean {
result := &SchemaOrBoolean{}
result.Schema = s
return result
}
// NewSchemaOrBooleanWithBoolean creates and returns a new object
func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean {
result := &SchemaOrBoolean{}
result.Boolean = &b
return result
}
// StringOrStringArray represents a value that can be either
// a String or an Array of Strings.
type StringOrStringArray struct {
String *string
StringArray *[]string
}
// NewStringOrStringArrayWithString creates and returns a new object
func NewStringOrStringArrayWithString(s string) *StringOrStringArray {
result := &StringOrStringArray{}
result.String = &s
return result
}
// NewStringOrStringArrayWithStringArray creates and returns a new object
func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray {
result := &StringOrStringArray{}
result.StringArray = &a
return result
}
// SchemaOrStringArray represents a value that can be either
// a Schema or an Array of Strings.
type SchemaOrStringArray struct {
Schema *Schema
StringArray *[]string
}
// SchemaOrSchemaArray represents a value that can be either
// a Schema or an Array of Schemas.
type SchemaOrSchemaArray struct {
Schema *Schema
SchemaArray *[]*Schema
}
// NewSchemaOrSchemaArrayWithSchema creates and returns a new object
func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray {
result := &SchemaOrSchemaArray{}
result.Schema = s
return result
}
// NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object
func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray {
result := &SchemaOrSchemaArray{}
result.SchemaArray = &a
return result
}
// SchemaEnumValue represents a value that can be part of an
// enumeration in a Schema.
type SchemaEnumValue struct {
String *string
Bool *bool
}
// NamedSchema is a name-value pair that is used to emulate maps
// with ordered keys.
type NamedSchema struct {
Name string
Value *Schema
}
// NewNamedSchema creates and returns a new object
func NewNamedSchema(name string, value *Schema) *NamedSchema {
return &NamedSchema{Name: name, Value: value}
}
// NamedSchemaOrStringArray is a name-value pair that is used
// to emulate maps with ordered keys.
type NamedSchemaOrStringArray struct {
Name string
Value *SchemaOrStringArray
}
// Access named subschemas by name
func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema {
if array == nil {
return nil
}
for _, pair := range *array {
if pair.Name == name {
return pair.Value
}
}
return nil
}
// PropertyWithName returns the selected element.
func (s *Schema) PropertyWithName(name string) *Schema {
return namedSchemaArrayElementWithName(s.Properties, name)
}
// PatternPropertyWithName returns the selected element.
func (s *Schema) PatternPropertyWithName(name string) *Schema {
return namedSchemaArrayElementWithName(s.PatternProperties, name)
}
// DefinitionWithName returns the selected element.
func (s *Schema) DefinitionWithName(name string) *Schema {
return namedSchemaArrayElementWithName(s.Definitions, name)
}
// AddProperty adds a named property.
func (s *Schema) AddProperty(name string, property *Schema) {
*s.Properties = append(*s.Properties, NewNamedSchema(name, property))
}

View File

@ -0,0 +1,394 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonschema
import (
"fmt"
"log"
"strings"
)
//
// OPERATIONS
// The following methods perform operations on Schemas.
//
// IsEmpty returns true if no members of the Schema are specified.
func (schema *Schema) IsEmpty() bool {
return (schema.Schema == nil) &&
(schema.ID == nil) &&
(schema.MultipleOf == nil) &&
(schema.Maximum == nil) &&
(schema.ExclusiveMaximum == nil) &&
(schema.Minimum == nil) &&
(schema.ExclusiveMinimum == nil) &&
(schema.MaxLength == nil) &&
(schema.MinLength == nil) &&
(schema.Pattern == nil) &&
(schema.AdditionalItems == nil) &&
(schema.Items == nil) &&
(schema.MaxItems == nil) &&
(schema.MinItems == nil) &&
(schema.UniqueItems == nil) &&
(schema.MaxProperties == nil) &&
(schema.MinProperties == nil) &&
(schema.Required == nil) &&
(schema.AdditionalProperties == nil) &&
(schema.Properties == nil) &&
(schema.PatternProperties == nil) &&
(schema.Dependencies == nil) &&
(schema.Enumeration == nil) &&
(schema.Type == nil) &&
(schema.AllOf == nil) &&
(schema.AnyOf == nil) &&
(schema.OneOf == nil) &&
(schema.Not == nil) &&
(schema.Definitions == nil) &&
(schema.Title == nil) &&
(schema.Description == nil) &&
(schema.Default == nil) &&
(schema.Format == nil) &&
(schema.Ref == nil)
}
// IsEqual returns true if two schemas are equal.
func (schema *Schema) IsEqual(schema2 *Schema) bool {
return schema.String() == schema2.String()
}
// SchemaOperation represents a function that can be applied to a Schema.
type SchemaOperation func(schema *Schema, context string)
// Applies a specified function to a Schema and all of the Schemas that it contains.
func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) {
if schema.AdditionalItems != nil {
s := schema.AdditionalItems.Schema
if s != nil {
s.applyToSchemas(operation, "AdditionalItems")
}
}
if schema.Items != nil {
if schema.Items.SchemaArray != nil {
for _, s := range *(schema.Items.SchemaArray) {
s.applyToSchemas(operation, "Items.SchemaArray")
}
} else if schema.Items.Schema != nil {
schema.Items.Schema.applyToSchemas(operation, "Items.Schema")
}
}
if schema.AdditionalProperties != nil {
s := schema.AdditionalProperties.Schema
if s != nil {
s.applyToSchemas(operation, "AdditionalProperties")
}
}
if schema.Properties != nil {
for _, pair := range *(schema.Properties) {
s := pair.Value
s.applyToSchemas(operation, "Properties")
}
}
if schema.PatternProperties != nil {
for _, pair := range *(schema.PatternProperties) {
s := pair.Value
s.applyToSchemas(operation, "PatternProperties")
}
}
if schema.Dependencies != nil {
for _, pair := range *(schema.Dependencies) {
schemaOrStringArray := pair.Value
s := schemaOrStringArray.Schema
if s != nil {
s.applyToSchemas(operation, "Dependencies")
}
}
}
if schema.AllOf != nil {
for _, s := range *(schema.AllOf) {
s.applyToSchemas(operation, "AllOf")
}
}
if schema.AnyOf != nil {
for _, s := range *(schema.AnyOf) {
s.applyToSchemas(operation, "AnyOf")
}
}
if schema.OneOf != nil {
for _, s := range *(schema.OneOf) {
s.applyToSchemas(operation, "OneOf")
}
}
if schema.Not != nil {
schema.Not.applyToSchemas(operation, "Not")
}
if schema.Definitions != nil {
for _, pair := range *(schema.Definitions) {
s := pair.Value
s.applyToSchemas(operation, "Definitions")
}
}
operation(schema, context)
}
// CopyProperties copies all non-nil properties from the source Schema to the schema Schema.
func (schema *Schema) CopyProperties(source *Schema) {
if source.Schema != nil {
schema.Schema = source.Schema
}
if source.ID != nil {
schema.ID = source.ID
}
if source.MultipleOf != nil {
schema.MultipleOf = source.MultipleOf
}
if source.Maximum != nil {
schema.Maximum = source.Maximum
}
if source.ExclusiveMaximum != nil {
schema.ExclusiveMaximum = source.ExclusiveMaximum
}
if source.Minimum != nil {
schema.Minimum = source.Minimum
}
if source.ExclusiveMinimum != nil {
schema.ExclusiveMinimum = source.ExclusiveMinimum
}
if source.MaxLength != nil {
schema.MaxLength = source.MaxLength
}
if source.MinLength != nil {
schema.MinLength = source.MinLength
}
if source.Pattern != nil {
schema.Pattern = source.Pattern
}
if source.AdditionalItems != nil {
schema.AdditionalItems = source.AdditionalItems
}
if source.Items != nil {
schema.Items = source.Items
}
if source.MaxItems != nil {
schema.MaxItems = source.MaxItems
}
if source.MinItems != nil {
schema.MinItems = source.MinItems
}
if source.UniqueItems != nil {
schema.UniqueItems = source.UniqueItems
}
if source.MaxProperties != nil {
schema.MaxProperties = source.MaxProperties
}
if source.MinProperties != nil {
schema.MinProperties = source.MinProperties
}
if source.Required != nil {
schema.Required = source.Required
}
if source.AdditionalProperties != nil {
schema.AdditionalProperties = source.AdditionalProperties
}
if source.Properties != nil {
schema.Properties = source.Properties
}
if source.PatternProperties != nil {
schema.PatternProperties = source.PatternProperties
}
if source.Dependencies != nil {
schema.Dependencies = source.Dependencies
}
if source.Enumeration != nil {
schema.Enumeration = source.Enumeration
}
if source.Type != nil {
schema.Type = source.Type
}
if source.AllOf != nil {
schema.AllOf = source.AllOf
}
if source.AnyOf != nil {
schema.AnyOf = source.AnyOf
}
if source.OneOf != nil {
schema.OneOf = source.OneOf
}
if source.Not != nil {
schema.Not = source.Not
}
if source.Definitions != nil {
schema.Definitions = source.Definitions
}
if source.Title != nil {
schema.Title = source.Title
}
if source.Description != nil {
schema.Description = source.Description
}
if source.Default != nil {
schema.Default = source.Default
}
if source.Format != nil {
schema.Format = source.Format
}
if source.Ref != nil {
schema.Ref = source.Ref
}
}
// TypeIs returns true if the Type of a Schema includes the specified type
func (schema *Schema) TypeIs(typeName string) bool {
if schema.Type != nil {
// the schema Type is either a string or an array of strings
if schema.Type.String != nil {
return (*(schema.Type.String) == typeName)
} else if schema.Type.StringArray != nil {
for _, n := range *(schema.Type.StringArray) {
if n == typeName {
return true
}
}
}
}
return false
}
// ResolveRefs resolves "$ref" elements in a Schema and its children.
// But if a reference refers to an object type, is inside a oneOf, or contains a oneOf,
// the reference is kept and we expect downstream tools to separately model these
// referenced schemas.
func (schema *Schema) ResolveRefs() {
rootSchema := schema
count := 1
for count > 0 {
count = 0
schema.applyToSchemas(
func(schema *Schema, context string) {
if schema.Ref != nil {
resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref))
if err != nil {
log.Printf("%+v", err)
} else if resolvedRef.TypeIs("object") {
// don't substitute for objects, we'll model the referenced schema with a class
} else if context == "OneOf" {
// don't substitute for references inside oneOf declarations
} else if resolvedRef.OneOf != nil {
// don't substitute for references that contain oneOf declarations
} else if resolvedRef.AdditionalProperties != nil {
// don't substitute for references that look like objects
} else {
schema.Ref = nil
schema.CopyProperties(resolvedRef)
count++
}
}
}, "")
}
}
// resolveJSONPointer resolves JSON pointers.
// This current implementation is very crude and custom for OpenAPI 2.0 schemas.
// It panics for any pointer that it is unable to resolve.
func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) {
parts := strings.Split(ref, "#")
if len(parts) == 2 {
documentName := parts[0] + "#"
if documentName == "#" && schema.ID != nil {
documentName = *(schema.ID)
}
path := parts[1]
document := schemas[documentName]
pathParts := strings.Split(path, "/")
// we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases
if len(pathParts) == 1 {
return document, nil
} else if len(pathParts) == 3 {
switch pathParts[1] {
case "definitions":
dictionary := document.Definitions
for _, pair := range *dictionary {
if pair.Name == pathParts[2] {
result = pair.Value
}
}
case "properties":
dictionary := document.Properties
for _, pair := range *dictionary {
if pair.Name == pathParts[2] {
result = pair.Value
}
}
default:
break
}
}
}
if result == nil {
return nil, fmt.Errorf("unresolved pointer: %+v", ref)
}
return result, nil
}
// ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema.
func (schema *Schema) ResolveAllOfs() {
schema.applyToSchemas(
func(schema *Schema, context string) {
if schema.AllOf != nil {
for _, allOf := range *(schema.AllOf) {
schema.CopyProperties(allOf)
}
schema.AllOf = nil
}
}, "resolveAllOfs")
}
// ResolveAnyOfs replaces all "anyOf" elements with "oneOf".
func (schema *Schema) ResolveAnyOfs() {
schema.applyToSchemas(
func(schema *Schema, context string) {
if schema.AnyOf != nil {
schema.OneOf = schema.AnyOf
schema.AnyOf = nil
}
}, "resolveAnyOfs")
}
// return a pointer to a copy of a passed-in string
func stringptr(input string) (output *string) {
return &input
}
// CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition
func (schema *Schema) CopyOfficialSchemaProperty(name string) {
*schema.Properties = append(*schema.Properties,
NewNamedSchema(name,
&Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)}))
}
// CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition
func (schema *Schema) CopyOfficialSchemaProperties(names []string) {
for _, name := range names {
schema.CopyOfficialSchemaProperty(name)
}
}

View File

@ -0,0 +1,442 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate go run generate-base.go
package jsonschema
import (
"fmt"
"io/ioutil"
"strconv"
"gopkg.in/yaml.v3"
)
// This is a global map of all known Schemas.
// It is initialized when the first Schema is created and inserted.
var schemas map[string]*Schema
// NewBaseSchema builds a schema object from an embedded json representation.
func NewBaseSchema() (schema *Schema, err error) {
b, err := baseSchemaBytes()
if err != nil {
return nil, err
}
var node yaml.Node
err = yaml.Unmarshal(b, &node)
if err != nil {
return nil, err
}
return NewSchemaFromObject(&node), nil
}
// NewSchemaFromFile reads a schema from a file.
// Currently this assumes that schemas are stored in the source distribution of this project.
func NewSchemaFromFile(filename string) (schema *Schema, err error) {
file, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
var node yaml.Node
err = yaml.Unmarshal(file, &node)
if err != nil {
return nil, err
}
return NewSchemaFromObject(&node), nil
}
// NewSchemaFromObject constructs a schema from a parsed JSON object.
// Due to the complexity of the schema representation, this is a
// custom reader and not the standard Go JSON reader (encoding/json).
func NewSchemaFromObject(jsonData *yaml.Node) *Schema {
switch jsonData.Kind {
case yaml.DocumentNode:
return NewSchemaFromObject(jsonData.Content[0])
case yaml.MappingNode:
schema := &Schema{}
for i := 0; i < len(jsonData.Content); i += 2 {
k := jsonData.Content[i].Value
v := jsonData.Content[i+1]
switch k {
case "$schema":
schema.Schema = schema.stringValue(v)
case "id":
schema.ID = schema.stringValue(v)
case "multipleOf":
schema.MultipleOf = schema.numberValue(v)
case "maximum":
schema.Maximum = schema.numberValue(v)
case "exclusiveMaximum":
schema.ExclusiveMaximum = schema.boolValue(v)
case "minimum":
schema.Minimum = schema.numberValue(v)
case "exclusiveMinimum":
schema.ExclusiveMinimum = schema.boolValue(v)
case "maxLength":
schema.MaxLength = schema.intValue(v)
case "minLength":
schema.MinLength = schema.intValue(v)
case "pattern":
schema.Pattern = schema.stringValue(v)
case "additionalItems":
schema.AdditionalItems = schema.schemaOrBooleanValue(v)
case "items":
schema.Items = schema.schemaOrSchemaArrayValue(v)
case "maxItems":
schema.MaxItems = schema.intValue(v)
case "minItems":
schema.MinItems = schema.intValue(v)
case "uniqueItems":
schema.UniqueItems = schema.boolValue(v)
case "maxProperties":
schema.MaxProperties = schema.intValue(v)
case "minProperties":
schema.MinProperties = schema.intValue(v)
case "required":
schema.Required = schema.arrayOfStringsValue(v)
case "additionalProperties":
schema.AdditionalProperties = schema.schemaOrBooleanValue(v)
case "properties":
schema.Properties = schema.mapOfSchemasValue(v)
case "patternProperties":
schema.PatternProperties = schema.mapOfSchemasValue(v)
case "dependencies":
schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v)
case "enum":
schema.Enumeration = schema.arrayOfEnumValuesValue(v)
case "type":
schema.Type = schema.stringOrStringArrayValue(v)
case "allOf":
schema.AllOf = schema.arrayOfSchemasValue(v)
case "anyOf":
schema.AnyOf = schema.arrayOfSchemasValue(v)
case "oneOf":
schema.OneOf = schema.arrayOfSchemasValue(v)
case "not":
schema.Not = NewSchemaFromObject(v)
case "definitions":
schema.Definitions = schema.mapOfSchemasValue(v)
case "title":
schema.Title = schema.stringValue(v)
case "description":
schema.Description = schema.stringValue(v)
case "default":
schema.Default = v
case "format":
schema.Format = schema.stringValue(v)
case "$ref":
schema.Ref = schema.stringValue(v)
default:
fmt.Printf("UNSUPPORTED (%s)\n", k)
}
}
// insert schema in global map
if schema.ID != nil {
if schemas == nil {
schemas = make(map[string]*Schema, 0)
}
schemas[*(schema.ID)] = schema
}
return schema
default:
fmt.Printf("schemaValue: unexpected node %+v\n", jsonData)
return nil
}
return nil
}
//
// BUILDERS
// The following methods build elements of Schemas from interface{} values.
// Each returns nil if it is unable to build the desired element.
//
// Gets the string value of an interface{} value if possible.
func (schema *Schema) stringValue(v *yaml.Node) *string {
switch v.Kind {
case yaml.ScalarNode:
return &v.Value
default:
fmt.Printf("stringValue: unexpected node %+v\n", v)
}
return nil
}
// Gets the numeric value of an interface{} value if possible.
func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber {
number := &SchemaNumber{}
switch v.Kind {
case yaml.ScalarNode:
switch v.Tag {
case "!!float":
v2, _ := strconv.ParseFloat(v.Value, 64)
number.Float = &v2
return number
case "!!int":
v2, _ := strconv.ParseInt(v.Value, 10, 64)
number.Integer = &v2
return number
default:
fmt.Printf("stringValue: unexpected node %+v\n", v)
}
default:
fmt.Printf("stringValue: unexpected node %+v\n", v)
}
return nil
}
// Gets the integer value of an interface{} value if possible.
func (schema *Schema) intValue(v *yaml.Node) *int64 {
switch v.Kind {
case yaml.ScalarNode:
switch v.Tag {
case "!!float":
v2, _ := strconv.ParseFloat(v.Value, 64)
v3 := int64(v2)
return &v3
case "!!int":
v2, _ := strconv.ParseInt(v.Value, 10, 64)
return &v2
default:
fmt.Printf("intValue: unexpected node %+v\n", v)
}
default:
fmt.Printf("intValue: unexpected node %+v\n", v)
}
return nil
}
// Gets the bool value of an interface{} value if possible.
func (schema *Schema) boolValue(v *yaml.Node) *bool {
switch v.Kind {
case yaml.ScalarNode:
switch v.Tag {
case "!!bool":
v2, _ := strconv.ParseBool(v.Value)
return &v2
default:
fmt.Printf("boolValue: unexpected node %+v\n", v)
}
default:
fmt.Printf("boolValue: unexpected node %+v\n", v)
}
return nil
}
// Gets a map of Schemas from an interface{} value if possible.
func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema {
switch v.Kind {
case yaml.MappingNode:
m := make([]*NamedSchema, 0)
for i := 0; i < len(v.Content); i += 2 {
k2 := v.Content[i].Value
v2 := v.Content[i+1]
pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)}
m = append(m, pair)
}
return &m
default:
fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v)
}
return nil
}
// Gets an array of Schemas from an interface{} value if possible.
func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema {
switch v.Kind {
case yaml.SequenceNode:
m := make([]*Schema, 0)
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.MappingNode:
s := NewSchemaFromObject(v2)
m = append(m, s)
default:
fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2)
}
}
return &m
case yaml.MappingNode:
m := make([]*Schema, 0)
s := NewSchemaFromObject(v)
m = append(m, s)
return &m
default:
fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v)
}
return nil
}
// Gets a Schema or an array of Schemas from an interface{} value if possible.
func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray {
switch v.Kind {
case yaml.SequenceNode:
m := make([]*Schema, 0)
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.MappingNode:
s := NewSchemaFromObject(v2)
m = append(m, s)
default:
fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2)
}
}
return &SchemaOrSchemaArray{SchemaArray: &m}
case yaml.MappingNode:
s := NewSchemaFromObject(v)
return &SchemaOrSchemaArray{Schema: s}
default:
fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v)
}
return nil
}
// Gets an array of strings from an interface{} value if possible.
func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string {
switch v.Kind {
case yaml.ScalarNode:
a := []string{v.Value}
return &a
case yaml.SequenceNode:
a := make([]string, 0)
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.ScalarNode:
a = append(a, v2.Value)
default:
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2)
}
}
return &a
default:
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v)
}
return nil
}
// Gets a string or an array of strings from an interface{} value if possible.
func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray {
switch v.Kind {
case yaml.ScalarNode:
s := &StringOrStringArray{}
s.String = &v.Value
return s
case yaml.SequenceNode:
a := make([]string, 0)
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.ScalarNode:
a = append(a, v2.Value)
default:
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2)
}
}
s := &StringOrStringArray{}
s.StringArray = &a
return s
default:
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v)
}
return nil
}
// Gets an array of enum values from an interface{} value if possible.
func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue {
a := make([]SchemaEnumValue, 0)
switch v.Kind {
case yaml.SequenceNode:
for _, v2 := range v.Content {
switch v2.Kind {
case yaml.ScalarNode:
switch v2.Tag {
case "!!str":
a = append(a, SchemaEnumValue{String: &v2.Value})
case "!!bool":
v3, _ := strconv.ParseBool(v2.Value)
a = append(a, SchemaEnumValue{Bool: &v3})
default:
fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag)
}
default:
fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2)
}
}
default:
fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v)
}
return &a
}
// Gets a map of schemas or string arrays from an interface{} value if possible.
func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray {
m := make([]*NamedSchemaOrStringArray, 0)
switch v.Kind {
case yaml.MappingNode:
for i := 0; i < len(v.Content); i += 2 {
k2 := v.Content[i].Value
v2 := v.Content[i+1]
switch v2.Kind {
case yaml.SequenceNode:
a := make([]string, 0)
for _, v3 := range v2.Content {
switch v3.Kind {
case yaml.ScalarNode:
a = append(a, v3.Value)
default:
fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3)
}
}
s := &SchemaOrStringArray{}
s.StringArray = &a
pair := &NamedSchemaOrStringArray{Name: k2, Value: s}
m = append(m, pair)
default:
fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2)
}
}
default:
fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v)
}
return &m
}
// Gets a schema or a boolean value from an interface{} value if possible.
func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean {
schemaOrBoolean := &SchemaOrBoolean{}
switch v.Kind {
case yaml.ScalarNode:
v2, _ := strconv.ParseBool(v.Value)
schemaOrBoolean.Boolean = &v2
case yaml.MappingNode:
schemaOrBoolean.Schema = NewSchemaFromObject(v)
default:
fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v)
}
return schemaOrBoolean
}

View File

@ -0,0 +1,150 @@
{
"id": "http://json-schema.org/draft-04/schema#",
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Core schema meta-schema",
"definitions": {
"schemaArray": {
"type": "array",
"minItems": 1,
"items": { "$ref": "#" }
},
"positiveInteger": {
"type": "integer",
"minimum": 0
},
"positiveIntegerDefault0": {
"allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ]
},
"simpleTypes": {
"enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
},
"stringArray": {
"type": "array",
"items": { "type": "string" },
"minItems": 1,
"uniqueItems": true
}
},
"type": "object",
"properties": {
"id": {
"type": "string",
"format": "uri"
},
"$schema": {
"type": "string",
"format": "uri"
},
"title": {
"type": "string"
},
"description": {
"type": "string"
},
"default": {},
"multipleOf": {
"type": "number",
"minimum": 0,
"exclusiveMinimum": true
},
"maximum": {
"type": "number"
},
"exclusiveMaximum": {
"type": "boolean",
"default": false
},
"minimum": {
"type": "number"
},
"exclusiveMinimum": {
"type": "boolean",
"default": false
},
"maxLength": { "$ref": "#/definitions/positiveInteger" },
"minLength": { "$ref": "#/definitions/positiveIntegerDefault0" },
"pattern": {
"type": "string",
"format": "regex"
},
"additionalItems": {
"anyOf": [
{ "type": "boolean" },
{ "$ref": "#" }
],
"default": {}
},
"items": {
"anyOf": [
{ "$ref": "#" },
{ "$ref": "#/definitions/schemaArray" }
],
"default": {}
},
"maxItems": { "$ref": "#/definitions/positiveInteger" },
"minItems": { "$ref": "#/definitions/positiveIntegerDefault0" },
"uniqueItems": {
"type": "boolean",
"default": false
},
"maxProperties": { "$ref": "#/definitions/positiveInteger" },
"minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" },
"required": { "$ref": "#/definitions/stringArray" },
"additionalProperties": {
"anyOf": [
{ "type": "boolean" },
{ "$ref": "#" }
],
"default": {}
},
"definitions": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"properties": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"patternProperties": {
"type": "object",
"additionalProperties": { "$ref": "#" },
"default": {}
},
"dependencies": {
"type": "object",
"additionalProperties": {
"anyOf": [
{ "$ref": "#" },
{ "$ref": "#/definitions/stringArray" }
]
}
},
"enum": {
"type": "array",
"minItems": 1,
"uniqueItems": true
},
"type": {
"anyOf": [
{ "$ref": "#/definitions/simpleTypes" },
{
"type": "array",
"items": { "$ref": "#/definitions/simpleTypes" },
"minItems": 1,
"uniqueItems": true
}
]
},
"allOf": { "$ref": "#/definitions/schemaArray" },
"anyOf": { "$ref": "#/definitions/schemaArray" },
"oneOf": { "$ref": "#/definitions/schemaArray" },
"not": { "$ref": "#" }
},
"dependencies": {
"exclusiveMaximum": [ "maximum" ],
"exclusiveMinimum": [ "minimum" ]
},
"default": {}
}

View File

@ -0,0 +1,369 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonschema
import (
"fmt"
"gopkg.in/yaml.v3"
)
const indentation = " "
func renderMappingNode(node *yaml.Node, indent string) (result string) {
result = "{\n"
innerIndent := indent + indentation
for i := 0; i < len(node.Content); i += 2 {
// first print the key
key := node.Content[i].Value
result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key)
// then the value
value := node.Content[i+1]
switch value.Kind {
case yaml.ScalarNode:
result += "\"" + value.Value + "\""
case yaml.MappingNode:
result += renderMappingNode(value, innerIndent)
case yaml.SequenceNode:
result += renderSequenceNode(value, innerIndent)
default:
result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value)
}
if i < len(node.Content)-2 {
result += ","
}
result += "\n"
}
result += indent + "}"
return result
}
func renderSequenceNode(node *yaml.Node, indent string) (result string) {
result = "[\n"
innerIndent := indent + indentation
for i := 0; i < len(node.Content); i++ {
item := node.Content[i]
switch item.Kind {
case yaml.ScalarNode:
result += innerIndent + "\"" + item.Value + "\""
case yaml.MappingNode:
result += innerIndent + renderMappingNode(item, innerIndent) + ""
default:
result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item)
}
if i < len(node.Content)-1 {
result += ","
}
result += "\n"
}
result += indent + "]"
return result
}
func renderStringArray(array []string, indent string) (result string) {
result = "[\n"
innerIndent := indent + indentation
for i, item := range array {
result += innerIndent + "\"" + item + "\""
if i < len(array)-1 {
result += ","
}
result += "\n"
}
result += indent + "]"
return result
}
// Render renders a yaml.Node as JSON
func Render(node *yaml.Node) string {
if node.Kind == yaml.DocumentNode {
if len(node.Content) == 1 {
return Render(node.Content[0])
}
} else if node.Kind == yaml.MappingNode {
return renderMappingNode(node, "") + "\n"
} else if node.Kind == yaml.SequenceNode {
return renderSequenceNode(node, "") + "\n"
}
return ""
}
func (object *SchemaNumber) nodeValue() *yaml.Node {
if object.Integer != nil {
return nodeForInt64(*object.Integer)
} else if object.Float != nil {
return nodeForFloat64(*object.Float)
} else {
return nil
}
}
func (object *SchemaOrBoolean) nodeValue() *yaml.Node {
if object.Schema != nil {
return object.Schema.nodeValue()
} else if object.Boolean != nil {
return nodeForBoolean(*object.Boolean)
} else {
return nil
}
}
func nodeForStringArray(array []string) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, item := range array {
content = append(content, nodeForString(item))
}
return nodeForSequence(content)
}
func nodeForSchemaArray(array []*Schema) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, item := range array {
content = append(content, item.nodeValue())
}
return nodeForSequence(content)
}
func (object *StringOrStringArray) nodeValue() *yaml.Node {
if object.String != nil {
return nodeForString(*object.String)
} else if object.StringArray != nil {
return nodeForStringArray(*(object.StringArray))
} else {
return nil
}
}
func (object *SchemaOrStringArray) nodeValue() *yaml.Node {
if object.Schema != nil {
return object.Schema.nodeValue()
} else if object.StringArray != nil {
return nodeForStringArray(*(object.StringArray))
} else {
return nil
}
}
func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node {
if object.Schema != nil {
return object.Schema.nodeValue()
} else if object.SchemaArray != nil {
return nodeForSchemaArray(*(object.SchemaArray))
} else {
return nil
}
}
func (object *SchemaEnumValue) nodeValue() *yaml.Node {
if object.String != nil {
return nodeForString(*object.String)
} else if object.Bool != nil {
return nodeForBoolean(*object.Bool)
} else {
return nil
}
}
func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, pair := range *(array) {
content = appendPair(content, pair.Name, pair.Value.nodeValue())
}
return nodeForMapping(content)
}
func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, pair := range *(array) {
content = appendPair(content, pair.Name, pair.Value.nodeValue())
}
return nodeForMapping(content)
}
func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node {
content := make([]*yaml.Node, 0)
for _, item := range *array {
content = append(content, item.nodeValue())
}
return nodeForSequence(content)
}
func nodeForMapping(content []*yaml.Node) *yaml.Node {
return &yaml.Node{
Kind: yaml.MappingNode,
Content: content,
}
}
func nodeForSequence(content []*yaml.Node) *yaml.Node {
return &yaml.Node{
Kind: yaml.SequenceNode,
Content: content,
}
}
func nodeForString(value string) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!str",
Value: value,
}
}
func nodeForBoolean(value bool) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!bool",
Value: fmt.Sprintf("%t", value),
}
}
func nodeForInt64(value int64) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!int",
Value: fmt.Sprintf("%d", value),
}
}
func nodeForFloat64(value float64) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: "!!float",
Value: fmt.Sprintf("%f", value),
}
}
func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node {
nodes = append(nodes, nodeForString(name))
nodes = append(nodes, value)
return nodes
}
func (schema *Schema) nodeValue() *yaml.Node {
n := &yaml.Node{Kind: yaml.MappingNode}
content := make([]*yaml.Node, 0)
if schema.Title != nil {
content = appendPair(content, "title", nodeForString(*schema.Title))
}
if schema.ID != nil {
content = appendPair(content, "id", nodeForString(*schema.ID))
}
if schema.Schema != nil {
content = appendPair(content, "$schema", nodeForString(*schema.Schema))
}
if schema.Type != nil {
content = appendPair(content, "type", schema.Type.nodeValue())
}
if schema.Items != nil {
content = appendPair(content, "items", schema.Items.nodeValue())
}
if schema.Description != nil {
content = appendPair(content, "description", nodeForString(*schema.Description))
}
if schema.Required != nil {
content = appendPair(content, "required", nodeForStringArray(*schema.Required))
}
if schema.AdditionalProperties != nil {
content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue())
}
if schema.PatternProperties != nil {
content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties))
}
if schema.Properties != nil {
content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties))
}
if schema.Dependencies != nil {
content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies))
}
if schema.Ref != nil {
content = appendPair(content, "$ref", nodeForString(*schema.Ref))
}
if schema.MultipleOf != nil {
content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue())
}
if schema.Maximum != nil {
content = appendPair(content, "maximum", schema.Maximum.nodeValue())
}
if schema.ExclusiveMaximum != nil {
content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum))
}
if schema.Minimum != nil {
content = appendPair(content, "minimum", schema.Minimum.nodeValue())
}
if schema.ExclusiveMinimum != nil {
content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum))
}
if schema.MaxLength != nil {
content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength))
}
if schema.MinLength != nil {
content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength))
}
if schema.Pattern != nil {
content = appendPair(content, "pattern", nodeForString(*schema.Pattern))
}
if schema.AdditionalItems != nil {
content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue())
}
if schema.MaxItems != nil {
content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems))
}
if schema.MinItems != nil {
content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems))
}
if schema.UniqueItems != nil {
content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems))
}
if schema.MaxProperties != nil {
content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties))
}
if schema.MinProperties != nil {
content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties))
}
if schema.Enumeration != nil {
content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration))
}
if schema.AllOf != nil {
content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf))
}
if schema.AnyOf != nil {
content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf))
}
if schema.OneOf != nil {
content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf))
}
if schema.Not != nil {
content = appendPair(content, "not", schema.Not.nodeValue())
}
if schema.Definitions != nil {
content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions))
}
if schema.Default != nil {
// m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default})
}
if schema.Format != nil {
content = appendPair(content, "format", nodeForString(*schema.Format))
}
n.Content = content
return n
}
// JSONString returns a json representation of a schema.
func (schema *Schema) JSONString() string {
node := schema.nodeValue()
return Render(node)
}

View File

@ -5,6 +5,7 @@ go_library(
srcs = [
"OpenAPIv2.go",
"OpenAPIv2.pb.go",
"document.go",
],
importmap = "k8s.io/kops/vendor/github.com/googleapis/gnostic/openapiv2",
importpath = "github.com/googleapis/gnostic/openapiv2",
@ -13,6 +14,8 @@ go_library(
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/github.com/golang/protobuf/ptypes/any:go_default_library",
"//vendor/github.com/googleapis/gnostic/compiler:go_default_library",
"//vendor/gopkg.in/yaml.v2:go_default_library",
"//vendor/google.golang.org/protobuf/reflect/protoreflect:go_default_library",
"//vendor/google.golang.org/protobuf/runtime/protoimpl:go_default_library",
"//vendor/gopkg.in/yaml.v3:go_default_library",
],
)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2020 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -41,6 +41,9 @@ option java_package = "org.openapi_v2";
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
option objc_class_prefix = "OAS";
// The Go package name.
option go_package = "openapiv2;openapi_v2";
message AdditionalPropertiesItem {
oneof oneof {
Schema schema = 1;
@ -553,7 +556,7 @@ message Response {
repeated NamedAny vendor_extension = 5;
}
// One or more JSON representations for parameters
// One or more JSON representations for responses
message ResponseDefinitions {
repeated NamedResponse additional_properties = 1;
}

View File

@ -1,16 +1,14 @@
# OpenAPI v2 Protocol Buffer Models
This directory contains a Protocol Buffer-language model
and related code for supporting OpenAPI v2.
This directory contains a Protocol Buffer-language model and related code for
supporting OpenAPI v2.
Gnostic applications and plugins can use OpenAPIv2.proto
to generate Protocol Buffer support code for their preferred languages.
Gnostic applications and plugins can use OpenAPIv2.proto to generate Protocol
Buffer support code for their preferred languages.
OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI
descriptions into the Protocol Buffer-based datastructures
generated from OpenAPIv2.proto.
OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into
the Protocol Buffer-based datastructures generated from OpenAPIv2.proto.
OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic
compiler generator, and OpenAPIv2.pb.go is generated by
protoc, the Protocol Buffer compiler, and protoc-gen-go, the
Protocol Buffer Go code generation plugin.
OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic compiler
generator, and OpenAPIv2.pb.go is generated by protoc, the Protocol Buffer
compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin.

View File

@ -0,0 +1,41 @@
// Copyright 2020 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package openapi_v2
import (
"github.com/googleapis/gnostic/compiler"
"gopkg.in/yaml.v3"
)
// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation.
func ParseDocument(b []byte) (*Document, error) {
info, err := compiler.ReadInfoFromBytes("", b)
if err != nil {
return nil, err
}
root := info.Content[0]
return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil))
}
// YAMLValue produces a serialized YAML representation of the document.
func (d *Document) YAMLValue(comment string) ([]byte, error) {
rawInfo := d.ToRawInfo()
rawInfo = &yaml.Node{
Kind: yaml.DocumentNode,
Content: []*yaml.Node{rawInfo},
HeadComment: comment,
}
return yaml.Marshal(rawInfo)
}

View File

@ -203,7 +203,7 @@
"additionalProperties": {
"$ref": "#/definitions/response"
},
"description": "One or more JSON representations for parameters"
"description": "One or more JSON representations for responses"
},
"externalDocs": {
"type": "object",
@ -1607,4 +1607,4 @@
}
}
}
}
}

View File

@ -1,44 +1,54 @@
# Mergo
[![GoDoc][3]][4]
[![GitHub release][5]][6]
[![GoCard][7]][8]
[![Build Status][1]][2]
[![Coverage Status][9]][10]
[![Sourcegraph][11]][12]
[![FOSSA Status][13]][14]
[![GoCenter Kudos][15]][16]
[1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo
[3]: https://godoc.org/github.com/imdario/mergo?status.svg
[4]: https://godoc.org/github.com/imdario/mergo
[5]: https://img.shields.io/github/release/imdario/mergo.svg
[6]: https://github.com/imdario/mergo/releases
[7]: https://goreportcard.com/badge/imdario/mergo
[8]: https://goreportcard.com/report/github.com/imdario/mergo
[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
[10]: https://coveralls.io/github/imdario/mergo?branch=master
[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo
[16]: https://search.gocenter.io/github.com/imdario/mergo
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
## Status
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
[![GoDoc][3]][4]
[![GoCard][5]][6]
[![Build Status][1]][2]
[![Coverage Status][7]][8]
[![Sourcegraph][9]][10]
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield)
[1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo
[3]: https://godoc.org/github.com/imdario/mergo?status.svg
[4]: https://godoc.org/github.com/imdario/mergo
[5]: https://goreportcard.com/badge/imdario/mergo
[6]: https://goreportcard.com/report/github.com/imdario/mergo
[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
[8]: https://coveralls.io/github/imdario/mergo?branch=master
[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
[10]: https://sourcegraph.com/github.com/imdario/mergo?badge
### Latest release
[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7).
### Important note
Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code.
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules.
If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
### Donations
If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes:
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
@ -87,8 +97,9 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month
- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
- [janoszen/containerssh](https://github.com/janoszen/containerssh)
## Installation
## Install
go get github.com/imdario/mergo
@ -99,7 +110,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month
## Usage
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
```go
if err := mergo.Merge(&dst, src); err != nil {
@ -125,9 +136,7 @@ if err := mergo.Map(&dst, srcMap); err != nil {
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
### Nice example
Here is a nice example:
```go
package main
@ -175,10 +184,10 @@ import (
"time"
)
type timeTransfomer struct {
type timeTransformer struct {
}
func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ == reflect.TypeOf(time.Time{}) {
return func(dst, src reflect.Value) error {
if dst.CanSet() {
@ -202,7 +211,7 @@ type Snapshot struct {
func main() {
src := Snapshot{time.Now()}
dest := Snapshot{}
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{}))
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
fmt.Println(dest)
// Will print
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }

View File

@ -4,41 +4,140 @@
// license that can be found in the LICENSE file.
/*
Package mergo merges same-type structs and maps by setting default values in zero-value fields.
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
Status
It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
Important note
Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
Install
Do your usual installation procedure:
go get github.com/imdario/mergo
// use in your .go code
import (
"github.com/imdario/mergo"
)
Usage
From my own work-in-progress project:
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
type networkConfig struct {
Protocol string
Address string
ServerType string `json: "server_type"`
Port uint16
if err := mergo.Merge(&dst, src); err != nil {
// ...
}
type FssnConfig struct {
Network networkConfig
Also, you can merge overwriting values using the transformer WithOverride.
if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
// ...
}
var fssnDefault = FssnConfig {
networkConfig {
"tcp",
"127.0.0.1",
"http",
31560,
},
Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
if err := mergo.Map(&dst, srcMap); err != nil {
// ...
}
// Inside a function [...]
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
if err := mergo.Merge(&config, fssnDefault); err != nil {
log.Fatal(err)
Here is a nice example:
package main
import (
"fmt"
"github.com/imdario/mergo"
)
type Foo struct {
A string
B int64
}
// More code [...]
func main() {
src := Foo{
A: "one",
B: 2,
}
dest := Foo{
A: "two",
}
mergo.Merge(&dest, src)
fmt.Println(dest)
// Will print
// {two 2}
}
Transformers
Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
package main
import (
"fmt"
"github.com/imdario/mergo"
"reflect"
"time"
)
type timeTransformer struct {
}
func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
if typ == reflect.TypeOf(time.Time{}) {
return func(dst, src reflect.Value) error {
if dst.CanSet() {
isZero := dst.MethodByName("IsZero")
result := isZero.Call([]reflect.Value{})
if result[0].Bool() {
dst.Set(src)
}
}
return nil
}
}
return nil
}
type Snapshot struct {
Time time.Time
// ...
}
func main() {
src := Snapshot{time.Now()}
dest := Snapshot{}
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
fmt.Println(dest)
// Will print
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
}
Contact me
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
About
Written by Dario Castañé: https://da.rio.hn
License
BSD 3-Clause license, as Go language.
*/
package mergo

5
vendor/github.com/imdario/mergo/go.mod generated vendored Normal file
View File

@ -0,0 +1,5 @@
module github.com/imdario/mergo
go 1.13
require gopkg.in/yaml.v2 v2.3.0

4
vendor/github.com/imdario/mergo/go.sum generated vendored Normal file
View File

@ -0,0 +1,4 @@
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -99,11 +99,11 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
continue
}
if srcKind == dstKind {
if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return
}
} else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return
}
} else if srcKind == reflect.Map {
@ -141,6 +141,9 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
}
func _map(dst, src interface{}, opts ...func(*Config)) error {
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
return ErrNonPointerAgument
}
var (
vDst, vSrc reflect.Value
err error
@ -157,8 +160,7 @@ func _map(dst, src interface{}, opts ...func(*Config)) error {
// To be friction-less, we redirect equal-type arguments
// to deepMerge. Only because arguments can be anything.
if vSrc.Kind() == vDst.Kind() {
_, err := deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
return err
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
}
switch vSrc.Kind() {
case reflect.Struct:

View File

@ -11,26 +11,26 @@ package mergo
import (
"fmt"
"reflect"
"unsafe"
)
func hasExportedField(dst reflect.Value) (exported bool) {
func hasMergeableFields(dst reflect.Value) (exported bool) {
for i, n := 0, dst.NumField(); i < n; i++ {
field := dst.Type().Field(i)
if isExportedComponent(&field) {
return true
if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
exported = exported || hasMergeableFields(dst.Field(i))
} else if isExportedComponent(&field) {
exported = exported || len(field.PkgPath) == 0
}
}
return
}
func isExportedComponent(field *reflect.StructField) bool {
name := field.Name
pkgPath := field.PkgPath
if len(pkgPath) > 0 {
return false
}
c := name[0]
c := field.Name[0]
if 'a' <= c && c <= 'z' || c == '_' {
return false
}
@ -44,6 +44,8 @@ type Config struct {
Transformers Transformers
overwriteWithEmptyValue bool
overwriteSliceWithEmptyValue bool
sliceDeepCopy bool
debug bool
}
type Transformers interface {
@ -53,17 +55,16 @@ type Transformers interface {
// Traverses recursively both values, assigning src's fields values to dst.
// The map argument tracks comparisons that have already been seen, which allows
// short circuiting on recursive types.
func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (dst reflect.Value, err error) {
dst = dstIn
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
overwrite := config.Overwrite
typeCheck := config.TypeCheck
overwriteWithEmptySrc := config.overwriteWithEmptyValue
overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
sliceDeepCopy := config.sliceDeepCopy
if !src.IsValid() {
return
}
if dst.CanAddr() {
addr := dst.UnsafeAddr()
h := 17 * addr
@ -71,7 +72,7 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int,
typ := dst.Type()
for p := seen; p != nil; p = p.next {
if p.ptr == addr && p.typ == typ {
return dst, nil
return nil
}
}
// Remember, remember...
@ -85,125 +86,153 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int,
}
}
if dst.IsValid() && src.IsValid() && src.Type() != dst.Type() {
err = fmt.Errorf("cannot append two different types (%s, %s)", src.Kind(), dst.Kind())
return
}
switch dst.Kind() {
case reflect.Struct:
if hasExportedField(dst) {
dstCp := reflect.New(dst.Type()).Elem()
if hasMergeableFields(dst) {
for i, n := 0, dst.NumField(); i < n; i++ {
dstField := dst.Field(i)
structField := dst.Type().Field(i)
// copy un-exported struct fields
if !isExportedComponent(&structField) {
rf := dstCp.Field(i)
rf = reflect.NewAt(rf.Type(), unsafe.Pointer(rf.UnsafeAddr())).Elem() //nolint:gosec
dstRF := dst.Field(i)
if !dst.Field(i).CanAddr() {
continue
}
dstRF = reflect.NewAt(dstRF.Type(), unsafe.Pointer(dstRF.UnsafeAddr())).Elem() //nolint:gosec
rf.Set(dstRF)
continue
}
dstField, err = deepMerge(dstField, src.Field(i), visited, depth+1, config)
if err != nil {
if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
return
}
dstCp.Field(i).Set(dstField)
}
if dst.CanSet() {
dst.Set(dstCp)
} else {
dst = dstCp
}
return
} else {
if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) {
dst = src
dst.Set(src)
}
}
case reflect.Map:
if dst.IsNil() && !src.IsNil() {
if dst.CanSet() {
dst.Set(reflect.MakeMap(dst.Type()))
} else {
dst = src
return
}
dst.Set(reflect.MakeMap(dst.Type()))
}
if src.Kind() != reflect.Map {
if overwrite {
dst.Set(src)
}
return
}
for _, key := range src.MapKeys() {
srcElement := src.MapIndex(key)
dstElement := dst.MapIndex(key)
if !srcElement.IsValid() {
continue
}
if dst.MapIndex(key).IsValid() {
k := dstElement.Interface()
dstElement = reflect.ValueOf(k)
}
if isReflectNil(srcElement) {
if overwrite || isReflectNil(dstElement) {
dst.SetMapIndex(key, srcElement)
dstElement := dst.MapIndex(key)
switch srcElement.Kind() {
case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
if srcElement.IsNil() {
if overwrite {
dst.SetMapIndex(key, srcElement)
}
continue
}
fallthrough
default:
if !srcElement.CanInterface() {
continue
}
switch reflect.TypeOf(srcElement.Interface()).Kind() {
case reflect.Struct:
fallthrough
case reflect.Ptr:
fallthrough
case reflect.Map:
srcMapElm := srcElement
dstMapElm := dstElement
if srcMapElm.CanInterface() {
srcMapElm = reflect.ValueOf(srcMapElm.Interface())
if dstMapElm.IsValid() {
dstMapElm = reflect.ValueOf(dstMapElm.Interface())
}
}
if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
return
}
case reflect.Slice:
srcSlice := reflect.ValueOf(srcElement.Interface())
var dstSlice reflect.Value
if !dstElement.IsValid() || dstElement.IsNil() {
dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
} else {
dstSlice = reflect.ValueOf(dstElement.Interface())
}
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
if typeCheck && srcSlice.Type() != dstSlice.Type() {
return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
}
dstSlice = srcSlice
} else if config.AppendSlice {
if srcSlice.Type() != dstSlice.Type() {
return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
}
dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
} else if sliceDeepCopy {
i := 0
for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
srcElement := srcSlice.Index(i)
dstElement := dstSlice.Index(i)
if srcElement.CanInterface() {
srcElement = reflect.ValueOf(srcElement.Interface())
}
if dstElement.CanInterface() {
dstElement = reflect.ValueOf(dstElement.Interface())
}
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return
}
}
}
dst.SetMapIndex(key, dstSlice)
}
continue
}
if !srcElement.CanInterface() {
if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
continue
}
if srcElement.CanInterface() {
srcElement = reflect.ValueOf(srcElement.Interface())
if dstElement.IsValid() {
dstElement = reflect.ValueOf(dstElement.Interface())
if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) {
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
dst.SetMapIndex(key, srcElement)
}
dstElement, err = deepMerge(dstElement, srcElement, visited, depth+1, config)
if err != nil {
return
}
dst.SetMapIndex(key, dstElement)
}
case reflect.Slice:
newSlice := dst
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
if typeCheck && src.Type() != dst.Type() {
return dst, fmt.Errorf("cannot override two slices with different type (%s, %s)", src.Type(), dst.Type())
}
newSlice = src
} else if config.AppendSlice {
if typeCheck && src.Type() != dst.Type() {
err = fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
return
}
newSlice = reflect.AppendSlice(dst, src)
}
if dst.CanSet() {
dst.Set(newSlice)
} else {
dst = newSlice
}
case reflect.Ptr, reflect.Interface:
if isReflectNil(src) {
if !dst.CanSet() {
break
}
if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) {
if dst.IsNil() || overwrite {
if overwrite || isEmptyValue(dst) {
if dst.CanSet() {
dst.Set(src)
} else {
dst = src
}
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
dst.Set(src)
} else if config.AppendSlice {
if src.Type() != dst.Type() {
return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
}
dst.Set(reflect.AppendSlice(dst, src))
} else if sliceDeepCopy {
for i := 0; i < src.Len() && i < dst.Len(); i++ {
srcElement := src.Index(i)
dstElement := dst.Index(i)
if srcElement.CanInterface() {
srcElement = reflect.ValueOf(srcElement.Interface())
}
if dstElement.CanInterface() {
dstElement = reflect.ValueOf(dstElement.Interface())
}
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return
}
}
}
case reflect.Ptr:
fallthrough
case reflect.Interface:
if isReflectNil(src) {
if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
dst.Set(src)
}
break
}
@ -214,33 +243,35 @@ func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int,
dst.Set(src)
}
} else if src.Kind() == reflect.Ptr {
if dst, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return
}
dst = dst.Addr()
} else if dst.Elem().Type() == src.Type() {
if dst, err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
return
}
} else {
return dst, ErrDifferentArgumentsTypes
return ErrDifferentArgumentsTypes
}
break
}
if dst.IsNil() || overwrite {
if (overwrite || isEmptyValue(dst)) && (overwriteWithEmptySrc || !isEmptyValue(src)) {
if dst.CanSet() {
dst.Set(src)
} else {
dst = src
}
if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
dst.Set(src)
}
} else if _, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return
break
}
if dst.Elem().Kind() == src.Elem().Kind() {
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return
}
break
}
default:
overwriteFull := (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst))
if overwriteFull {
mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc)
if mustSet {
if dst.CanSet() {
dst.Set(src)
} else {
@ -281,6 +312,7 @@ func WithOverride(config *Config) {
// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
func WithOverwriteWithEmptyValue(config *Config) {
config.Overwrite = true
config.overwriteWithEmptyValue = true
}
@ -299,7 +331,16 @@ func WithTypeCheck(config *Config) {
config.TypeCheck = true
}
// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
func WithSliceDeepCopy(config *Config) {
config.sliceDeepCopy = true
config.Overwrite = true
}
func merge(dst, src interface{}, opts ...func(*Config)) error {
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
return ErrNonPointerAgument
}
var (
vDst, vSrc reflect.Value
err error
@ -314,14 +355,10 @@ func merge(dst, src interface{}, opts ...func(*Config)) error {
if vDst, vSrc, err = resolveValues(dst, src); err != nil {
return err
}
if !vDst.CanSet() {
return fmt.Errorf("cannot set dst, needs reference")
}
if vDst.Type() != vSrc.Type() {
return ErrDifferentArgumentsTypes
}
_, err = deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
return err
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
}
// IsReflectNil is the reflect value provided nil

View File

@ -20,6 +20,7 @@ var (
ErrNotSupported = errors.New("only structs and maps are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
ErrNonPointerAgument = errors.New("dst must be a pointer")
)
// During deepMerge, must keep track of checks that are
@ -75,23 +76,3 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
}
return
}
// Traverses recursively both values, assigning src's fields values to dst.
// The map argument tracks comparisons that have already been seen, which allows
// short circuiting on recursive types.
func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
if dst.CanAddr() {
addr := dst.UnsafeAddr()
h := 17 * addr
seen := visited[h]
typ := dst.Type()
for p := seen; p != nil; p = p.next {
if p.ptr == addr && p.typ == typ {
return nil
}
}
// Remember, remember...
visited[h] = &visit{addr, typ, seen}
}
return // TODO refactor
}

View File

@ -1,11 +0,0 @@
language: go
sudo: false
matrix:
include:
- go: "1.9"
- go: "1.10"
- go: "tip"
script:
- go test -race ./...

View File

@ -6,7 +6,10 @@ go_library(
importmap = "k8s.io/kops/vendor/github.com/kisielk/errcheck",
importpath = "github.com/kisielk/errcheck",
visibility = ["//visibility:private"],
deps = ["//vendor/github.com/kisielk/errcheck/internal/errcheck:go_default_library"],
deps = [
"//vendor/github.com/kisielk/errcheck/errcheck:go_default_library",
"//vendor/golang.org/x/tools/go/packages:go_default_library",
],
)
go_binary(

View File

@ -2,13 +2,13 @@
errcheck is a program for checking for unchecked errors in go programs.
[![Build Status](https://travis-ci.org/kisielk/errcheck.png?branch=master)](https://travis-ci.org/kisielk/errcheck)
![errcheck](https://github.com/kisielk/errcheck/workflows/errcheck/badge.svg)
## Install
go get -u github.com/kisielk/errcheck
errcheck requires Go 1.9 or newer and depends on the package go/packages from the golang.org/x/tools repository.
errcheck requires Go 1.12 or newer, and depends on the package go/packages from the golang.org/x/tools repository.
## Use
@ -55,11 +55,27 @@ An example of an exclude file is:
io/ioutil.ReadFile
io.Copy(*bytes.Buffer)
io.Copy(os.Stdout)
// Sometimes we don't care if a HTTP request fails.
(*net/http.Client).Do
The exclude list is combined with an internal list for functions in the Go standard library that
have an error return type but are documented to never return an error.
By default, the exclude list is combined with an internal list for functions in
the Go standard library that have an error return type but are documented to never
return an error. To disable the built-in exclude list, pass the `-excludeonly` flag.
Run errcheck in `-verbose` mode to see the resulting list of added excludes.
When using vendored dependencies, specify the full import path. For example:
* Your project's import path is `example.com/yourpkg`
* You've vendored `example.net/fmt2` as `vendor/example.net/fmt2`
* You want to exclude `fmt2.Println` from error checking
In this case, add this line to your exclude file:
```
example.com/yourpkg/vendor/example.net/fmt2.Println
```
Empty lines and lines starting with `//` are ignored.
### The deprecated method
@ -96,14 +112,6 @@ specified for it. To disable this, specify a regex that matches nothing:
The `-ignoretests` flag disables checking of `_test.go` files. It takes
no arguments.
## Cgo
Currently errcheck is unable to check packages that import "C" due to limitations in the importer when used with versions earlier than Go 1.11.
However, you can use errcheck on packages that depend on those which use cgo. In order for this to work you need to go install the cgo dependencies before running errcheck on the dependent packages.
See https://github.com/kisielk/errcheck/issues/16 for more details.
## Exit Codes
errcheck returns 1 if any problems were found in the checked files.

View File

@ -5,9 +5,11 @@ go_library(
srcs = [
"embedded_walker.go",
"errcheck.go",
"tags.go",
"tags_compat.go",
],
importmap = "k8s.io/kops/vendor/github.com/kisielk/errcheck/internal/errcheck",
importpath = "github.com/kisielk/errcheck/internal/errcheck",
visibility = ["//vendor/github.com/kisielk/errcheck:__subpackages__"],
importmap = "k8s.io/kops/vendor/github.com/kisielk/errcheck/errcheck",
importpath = "github.com/kisielk/errcheck/errcheck",
visibility = ["//visibility:public"],
deps = ["//vendor/golang.org/x/tools/go/packages:go_default_library"],
)

View File

@ -1,6 +1,4 @@
// Package errcheck is the library used to implement the errcheck command-line tool.
//
// Note: The API of this package has not been finalized and may change at any point.
package errcheck
import (
@ -14,7 +12,6 @@ import (
"regexp"
"sort"
"strings"
"sync"
"golang.org/x/tools/go/packages"
)
@ -23,107 +20,16 @@ var errorType *types.Interface
func init() {
errorType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface)
}
var (
// ErrNoGoFiles is returned when CheckPackage is run on a package with no Go source files
ErrNoGoFiles = errors.New("package contains no go source files")
)
// UncheckedError indicates the position of an unchecked error return.
type UncheckedError struct {
Pos token.Position
Line string
FuncName string
}
// UncheckedErrors is returned from the CheckPackage function if the package contains
// any unchecked errors.
// Errors should be appended using the Append method, which is safe to use concurrently.
type UncheckedErrors struct {
mu sync.Mutex
// Errors is a list of all the unchecked errors in the package.
// Printing an error reports its position within the file and the contents of the line.
Errors []UncheckedError
}
func (e *UncheckedErrors) Append(errors ...UncheckedError) {
e.mu.Lock()
defer e.mu.Unlock()
e.Errors = append(e.Errors, errors...)
}
func (e *UncheckedErrors) Error() string {
return fmt.Sprintf("%d unchecked errors", len(e.Errors))
}
// Len is the number of elements in the collection.
func (e *UncheckedErrors) Len() int { return len(e.Errors) }
// Swap swaps the elements with indexes i and j.
func (e *UncheckedErrors) Swap(i, j int) { e.Errors[i], e.Errors[j] = e.Errors[j], e.Errors[i] }
type byName struct{ *UncheckedErrors }
// Less reports whether the element with index i should sort before the element with index j.
func (e byName) Less(i, j int) bool {
ei, ej := e.Errors[i], e.Errors[j]
pi, pj := ei.Pos, ej.Pos
if pi.Filename != pj.Filename {
return pi.Filename < pj.Filename
}
if pi.Line != pj.Line {
return pi.Line < pj.Line
}
if pi.Column != pj.Column {
return pi.Column < pj.Column
}
return ei.Line < ej.Line
}
type Checker struct {
// ignore is a map of package names to regular expressions. Identifiers from a package are
// checked against its regular expressions and if any of the expressions match the call
// is not checked.
Ignore map[string]*regexp.Regexp
// If blank is true then assignments to the blank identifier are also considered to be
// ignored errors.
Blank bool
// If asserts is true then ignored type assertion results are also checked
Asserts bool
// build tags
Tags []string
Verbose bool
// If true, checking of _test.go files is disabled
WithoutTests bool
// If true, checking of files with generated code is disabled
WithoutGeneratedCode bool
exclude map[string]bool
}
func NewChecker() *Checker {
c := Checker{}
c.SetExclude(map[string]bool{})
return &c
}
func (c *Checker) SetExclude(l map[string]bool) {
c.exclude = map[string]bool{}
// Default exclude for stdlib functions
for _, exc := range []string{
// DefaultExcludedSymbols is a list of symbol names that are usually excluded from checks by default.
//
// Note, that they still need to be explicitly copied to Checker.Exclusions.Symbols
DefaultExcludedSymbols = []string{
// bytes
"(*bytes.Buffer).Write",
"(*bytes.Buffer).WriteByte",
@ -157,19 +63,133 @@ func (c *Checker) SetExclude(l map[string]bool) {
// hash
"(hash.Hash).Write",
} {
c.exclude[exc] = true
}
)
for k := range l {
c.exclude[k] = true
}
// UncheckedError indicates the position of an unchecked error return.
type UncheckedError struct {
Pos token.Position
Line string
FuncName string
}
func (c *Checker) logf(msg string, args ...interface{}) {
if c.Verbose {
fmt.Fprintf(os.Stderr, msg+"\n", args...)
// Result is returned from the CheckPackage function, and holds all the errors
// that were found to be unchecked in a package.
//
// Aggregation can be done using the Append method for users that want to
// combine results from multiple packages.
type Result struct {
// UncheckedErrors is a list of all the unchecked errors in the package.
// Printing an error reports its position within the file and the contents of the line.
UncheckedErrors []UncheckedError
}
type byName []UncheckedError
// Less reports whether the element with index i should sort before the element with index j.
func (b byName) Less(i, j int) bool {
ei, ej := b[i], b[j]
pi, pj := ei.Pos, ej.Pos
if pi.Filename != pj.Filename {
return pi.Filename < pj.Filename
}
if pi.Line != pj.Line {
return pi.Line < pj.Line
}
if pi.Column != pj.Column {
return pi.Column < pj.Column
}
return ei.Line < ej.Line
}
func (b byName) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}
func (b byName) Len() int {
return len(b)
}
// Append appends errors to e. Append does not do any duplicate checking.
func (r *Result) Append(other Result) {
r.UncheckedErrors = append(r.UncheckedErrors, other.UncheckedErrors...)
}
// Returns the unique errors that have been accumulated. Duplicates may occur
// when a file containing an unchecked error belongs to > 1 package.
//
// The method receiver remains unmodified after the call to Unique.
func (r Result) Unique() Result {
result := make([]UncheckedError, len(r.UncheckedErrors))
copy(result, r.UncheckedErrors)
sort.Sort((byName)(result))
uniq := result[:0] // compact in-place
for i, err := range result {
if i == 0 || err != result[i-1] {
uniq = append(uniq, err)
}
}
return Result{UncheckedErrors: uniq}
}
// Exclusions define symbols and language elements that will be not checked
type Exclusions struct {
// Packages lists paths of excluded packages.
Packages []string
// SymbolRegexpsByPackage maps individual package paths to regular
// expressions that match symbols to be excluded.
//
// Packages whose paths appear both here and in Packages list will
// be excluded entirely.
//
// This is a legacy input that will be deprecated in errcheck version 2 and
// should not be used.
SymbolRegexpsByPackage map[string]*regexp.Regexp
// Symbols lists patterns that exclude individual package symbols.
//
// For example:
//
// "fmt.Errorf" // function
// "fmt.Fprintf(os.Stderr)" // function with set argument value
// "(hash.Hash).Write" // method
//
Symbols []string
// TestFiles excludes _test.go files.
TestFiles bool
// GeneratedFiles excludes generated source files.
//
// Source file is assumed to be generated if its contents
// match the following regular expression:
//
// ^// Code generated .* DO NOT EDIT\\.$
//
GeneratedFiles bool
// BlankAssignments ignores assignments to blank identifier.
BlankAssignments bool
// TypeAssertions ignores unchecked type assertions.
TypeAssertions bool
}
// Checker checks that you checked errors.
type Checker struct {
// Exclusions defines code packages, symbols, and other elements that will not be checked.
Exclusions Exclusions
// Tags are a list of build tags to use.
Tags []string
// The mod flag for go build.
Mod string
}
// loadPackages is used for testing.
@ -177,19 +197,26 @@ var loadPackages = func(cfg *packages.Config, paths ...string) ([]*packages.Pack
return packages.Load(cfg, paths...)
}
func (c *Checker) load(paths ...string) ([]*packages.Package, error) {
// LoadPackages loads all the packages in all the paths provided. It uses the
// exclusions and build tags provided to by the user when loading the packages.
func (c *Checker) LoadPackages(paths ...string) ([]*packages.Package, error) {
buildFlags := []string{fmtTags(c.Tags)}
if c.Mod != "" {
buildFlags = append(buildFlags, fmt.Sprintf("-mod=%s", c.Mod))
}
cfg := &packages.Config{
Mode: packages.LoadAllSyntax,
Tests: !c.WithoutTests,
BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(c.Tags, " "))},
Tests: !c.Exclusions.TestFiles,
BuildFlags: buildFlags,
}
return loadPackages(cfg, paths...)
}
var generatedCodeRegexp = regexp.MustCompile("^// Code generated .* DO NOT EDIT\\.$")
var dotStar = regexp.MustCompile(".*")
func (c *Checker) shouldSkipFile(file *ast.File) bool {
if !c.WithoutGeneratedCode {
if !c.Exclusions.GeneratedFiles {
return false
}
@ -204,63 +231,47 @@ func (c *Checker) shouldSkipFile(file *ast.File) bool {
return false
}
// CheckPackages checks packages for errors.
func (c *Checker) CheckPackages(paths ...string) error {
pkgs, err := c.load(paths...)
if err != nil {
return err
// CheckPackage checks packages for errors that have not been checked.
//
// It will exclude specific errors from analysis if the user has configured
// exclusions.
func (c *Checker) CheckPackage(pkg *packages.Package) Result {
excludedSymbols := map[string]bool{}
for _, sym := range c.Exclusions.Symbols {
excludedSymbols[sym] = true
}
// Check for errors in the initial packages.
for _, pkg := range pkgs {
if len(pkg.Errors) > 0 {
return fmt.Errorf("errors while loading package %s: %v", pkg.ID, pkg.Errors)
ignore := map[string]*regexp.Regexp{}
// Apply SymbolRegexpsByPackage first so that if the same path appears in
// Packages, a more narrow regexp will be superceded by dotStar below.
if regexps := c.Exclusions.SymbolRegexpsByPackage; regexps != nil {
for pkg, re := range regexps {
// TODO warn if previous entry overwritten?
ignore[nonVendoredPkgPath(pkg)] = re
}
}
var wg sync.WaitGroup
u := &UncheckedErrors{}
for _, pkg := range pkgs {
wg.Add(1)
go func(pkg *packages.Package) {
defer wg.Done()
c.logf("Checking %s", pkg.Types.Path())
v := &visitor{
pkg: pkg,
ignore: c.Ignore,
blank: c.Blank,
asserts: c.Asserts,
lines: make(map[string][]string),
exclude: c.exclude,
errors: []UncheckedError{},
}
for _, astFile := range v.pkg.Syntax {
if c.shouldSkipFile(astFile) {
continue
}
ast.Walk(v, astFile)
}
u.Append(v.errors...)
}(pkg)
for _, pkg := range c.Exclusions.Packages {
// TODO warn if previous entry overwritten?
ignore[nonVendoredPkgPath(pkg)] = dotStar
}
wg.Wait()
if u.Len() > 0 {
// Sort unchecked errors and remove duplicates. Duplicates may occur when a file
// containing an unchecked error belongs to > 1 package.
sort.Sort(byName{u})
uniq := u.Errors[:0] // compact in-place
for i, err := range u.Errors {
if i == 0 || err != u.Errors[i-1] {
uniq = append(uniq, err)
}
v := &visitor{
pkg: pkg,
ignore: ignore,
blank: !c.Exclusions.BlankAssignments,
asserts: !c.Exclusions.TypeAssertions,
lines: make(map[string][]string),
exclude: excludedSymbols,
errors: []UncheckedError{},
}
for _, astFile := range v.pkg.Syntax {
if c.shouldSkipFile(astFile) {
continue
}
u.Errors = uniq
return u
ast.Walk(v, astFile)
}
return nil
return Result{UncheckedErrors: v.errors}
}
// visitor implements the errcheck algorithm
@ -433,32 +444,24 @@ func (v *visitor) ignoreCall(call *ast.CallExpr) bool {
if obj := v.pkg.TypesInfo.Uses[id]; obj != nil {
if pkg := obj.Pkg(); pkg != nil {
if re, ok := v.ignore[pkg.Path()]; ok {
if re, ok := v.ignore[nonVendoredPkgPath(pkg.Path())]; ok {
return re.MatchString(id.Name)
}
// if current package being considered is vendored, check to see if it should be ignored based
// on the unvendored path.
if nonVendoredPkg, ok := nonVendoredPkgPath(pkg.Path()); ok {
if re, ok := v.ignore[nonVendoredPkg]; ok {
return re.MatchString(id.Name)
}
}
}
}
return false
}
// nonVendoredPkgPath returns the unvendored version of the provided package path (or returns the provided path if it
// does not represent a vendored path). The second return value is true if the provided package was vendored, false
// otherwise.
func nonVendoredPkgPath(pkgPath string) (string, bool) {
// nonVendoredPkgPath returns the unvendored version of the provided package
// path (or returns the provided path if it does not represent a vendored
// path).
func nonVendoredPkgPath(pkgPath string) string {
lastVendorIndex := strings.LastIndex(pkgPath, "/vendor/")
if lastVendorIndex == -1 {
return pkgPath, false
return pkgPath
}
return pkgPath[lastVendorIndex+len("/vendor/"):], true
return pkgPath[lastVendorIndex+len("/vendor/"):]
}
// errorsByArg returns a slice s such that

12
vendor/github.com/kisielk/errcheck/errcheck/tags.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// +build go1.13
package errcheck
import (
"fmt"
"strings"
)
func fmtTags(tags []string) string {
return fmt.Sprintf("-tags=%s", strings.Join(tags, ","))
}

View File

@ -0,0 +1,13 @@
// +build go1.11
// +build !go1.13
package errcheck
import (
"fmt"
"strings"
)
func fmtTags(tags []string) string {
return fmt.Sprintf("-tags=%s", strings.Join(tags, " "))
}

View File

@ -1,3 +1,5 @@
module github.com/kisielk/errcheck
require golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563
go 1.14
require golang.org/x/tools v0.0.0-20200619180055-7c47624df98f

View File

@ -1,4 +1,20 @@
golang.org/x/tools v0.0.0-20180803180156-3c07937fe18c h1:Z6Xcg33osoOLCiz/EFPHedK6zJ9nl1KK4WH/LlykMxs=
golang.org/x/tools v0.0.0-20180803180156-3c07937fe18c/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563 h1:NIou6eNFigscvKJmsbyez16S2cIS6idossORlFtSt2E=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f h1:tuwaIjfUa6eI6REiNueIxvNm1popyPUnqWga83S7U0o=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -2,15 +2,19 @@ package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"github.com/kisielk/errcheck/internal/errcheck"
"github.com/kisielk/errcheck/errcheck"
"golang.org/x/tools/go/packages"
)
const (
@ -19,10 +23,14 @@ const (
exitFatalError
)
var abspath bool
type ignoreFlag map[string]*regexp.Regexp
// global flags
var (
abspath bool
verbose bool
)
func (f ignoreFlag) String() string {
pairs := make([]string, 0, len(f))
for pkg, re := range f {
@ -61,17 +69,16 @@ func (f ignoreFlag) Set(s string) error {
type tagsFlag []string
func (f *tagsFlag) String() string {
return fmt.Sprintf("%q", strings.Join(*f, " "))
return fmt.Sprintf("%q", strings.Join(*f, ","))
}
func (f *tagsFlag) Set(s string) error {
if s == "" {
return nil
}
tags := strings.Split(s, " ")
if tags == nil {
return nil
}
tags := strings.FieldsFunc(s, func(c rune) bool {
return c == ' ' || c == ','
})
for _, tag := range tags {
if tag != "" {
*f = append(*f, tag)
@ -80,14 +87,12 @@ func (f *tagsFlag) Set(s string) error {
return nil
}
var dotStar = regexp.MustCompile(".*")
func reportUncheckedErrors(e *errcheck.UncheckedErrors, verbose bool) {
func reportResult(e errcheck.Result) {
wd, err := os.Getwd()
if err != nil {
wd = ""
}
for _, uncheckedError := range e.Errors {
for _, uncheckedError := range e.UncheckedErrors {
pos := uncheckedError.Pos.String()
if !abspath {
newPos, err := filepath.Rel(wd, pos)
@ -104,41 +109,87 @@ func reportUncheckedErrors(e *errcheck.UncheckedErrors, verbose bool) {
}
}
func mainCmd(args []string) int {
runtime.GOMAXPROCS(runtime.NumCPU())
func logf(msg string, args ...interface{}) {
if verbose {
fmt.Fprintf(os.Stderr, msg+"\n", args...)
}
}
checker := errcheck.NewChecker()
paths, err := parseFlags(checker, args)
if err != exitCodeOk {
return err
func mainCmd(args []string) int {
var checker errcheck.Checker
paths, rc := parseFlags(&checker, args)
if rc != exitCodeOk {
return rc
}
if err := checker.CheckPackages(paths...); err != nil {
if e, ok := err.(*errcheck.UncheckedErrors); ok {
reportUncheckedErrors(e, checker.Verbose)
return exitUncheckedError
} else if err == errcheck.ErrNoGoFiles {
result, err := checkPaths(&checker, paths...)
if err != nil {
if err == errcheck.ErrNoGoFiles {
fmt.Fprintln(os.Stderr, err)
return exitCodeOk
}
fmt.Fprintf(os.Stderr, "error: failed to check packages: %s\n", err)
return exitFatalError
}
if len(result.UncheckedErrors) > 0 {
reportResult(result)
return exitUncheckedError
}
return exitCodeOk
}
func checkPaths(c *errcheck.Checker, paths ...string) (errcheck.Result, error) {
pkgs, err := c.LoadPackages(paths...)
if err != nil {
return errcheck.Result{}, err
}
// Check for errors in the initial packages.
work := make(chan *packages.Package, len(pkgs))
for _, pkg := range pkgs {
if len(pkg.Errors) > 0 {
return errcheck.Result{}, fmt.Errorf("errors while loading package %s: %v", pkg.ID, pkg.Errors)
}
work <- pkg
}
close(work)
var wg sync.WaitGroup
result := &errcheck.Result{}
mu := &sync.Mutex{}
for i := 0; i < runtime.NumCPU(); i++ {
wg.Add(1)
go func() {
defer wg.Done()
for pkg := range work {
logf("checking %s", pkg.Types.Path())
r := c.CheckPackage(pkg)
mu.Lock()
result.Append(r)
mu.Unlock()
}
}()
}
wg.Wait()
return result.Unique(), nil
}
func parseFlags(checker *errcheck.Checker, args []string) ([]string, int) {
flags := flag.NewFlagSet(args[0], flag.ContinueOnError)
flags.BoolVar(&checker.Blank, "blank", false, "if true, check for errors assigned to blank identifier")
flags.BoolVar(&checker.Asserts, "asserts", false, "if true, check for ignored type assertion results")
flags.BoolVar(&checker.WithoutTests, "ignoretests", false, "if true, checking of _test.go files is disabled")
flags.BoolVar(&checker.WithoutGeneratedCode, "ignoregenerated", false, "if true, checking of files with generated code is disabled")
flags.BoolVar(&checker.Verbose, "verbose", false, "produce more verbose logging")
var checkAsserts, checkBlanks bool
flags.BoolVar(&checkBlanks, "blank", false, "if true, check for errors assigned to blank identifier")
flags.BoolVar(&checkAsserts, "asserts", false, "if true, check for ignored type assertion results")
flags.BoolVar(&checker.Exclusions.TestFiles, "ignoretests", false, "if true, checking of _test.go files is disabled")
flags.BoolVar(&checker.Exclusions.GeneratedFiles, "ignoregenerated", false, "if true, checking of files with generated code is disabled")
flags.BoolVar(&verbose, "verbose", false, "produce more verbose logging")
flags.BoolVar(&abspath, "abspath", false, "print absolute paths to files")
tags := tagsFlag{}
flags.Var(&tags, "tags", "space-separated list of build tags to include")
flags.Var(&tags, "tags", "comma or space-separated list of build tags to include")
ignorePkg := flags.String("ignorepkg", "", "comma-separated list of package paths to ignore")
ignore := ignoreFlag(map[string]*regexp.Regexp{})
flags.Var(ignore, "ignore", "[deprecated] comma-separated list of pairs of the form pkg:regex\n"+
@ -147,48 +198,75 @@ func parseFlags(checker *errcheck.Checker, args []string) ([]string, int) {
var excludeFile string
flags.StringVar(&excludeFile, "exclude", "", "Path to a file containing a list of functions to exclude from checking")
var excludeOnly bool
flags.BoolVar(&excludeOnly, "excludeonly", false, "Use only excludes from -exclude file")
flags.StringVar(&checker.Mod, "mod", "", "module download mode to use: readonly or vendor. See 'go help modules' for more.")
if err := flags.Parse(args[1:]); err != nil {
return nil, exitFatalError
}
if excludeFile != "" {
exclude := make(map[string]bool)
fh, err := os.Open(excludeFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Could not read exclude file: %s\n", err)
return nil, exitFatalError
}
scanner := bufio.NewScanner(fh)
for scanner.Scan() {
name := scanner.Text()
exclude[name] = true
checker.Exclusions.BlankAssignments = !checkBlanks
checker.Exclusions.TypeAssertions = !checkAsserts
if checker.Verbose {
fmt.Printf("Excluding %s\n", name)
}
}
if err := scanner.Err(); err != nil {
fmt.Fprintf(os.Stderr, "Could not read exclude file: %s\n", err)
if !excludeOnly {
checker.Exclusions.Symbols = append(checker.Exclusions.Symbols, errcheck.DefaultExcludedSymbols...)
}
if excludeFile != "" {
excludes, err := readExcludes(excludeFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Could not read exclude file: %v\n", err)
return nil, exitFatalError
}
checker.SetExclude(exclude)
checker.Exclusions.Symbols = append(checker.Exclusions.Symbols, excludes...)
}
checker.Tags = tags
for _, pkg := range strings.Split(*ignorePkg, ",") {
if pkg != "" {
ignore[pkg] = dotStar
checker.Exclusions.Packages = append(checker.Exclusions.Packages, pkg)
}
}
checker.Ignore = ignore
checker.Exclusions.SymbolRegexpsByPackage = ignore
paths := flags.Args()
if len(paths) == 0 {
paths = []string{"."}
}
return paths, exitCodeOk
}
// readExcludes reads an excludes file, a newline delimited file that lists
// patterns for which to allow unchecked errors.
func readExcludes(path string) ([]string, error) {
var excludes []string
buf, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(bytes.NewReader(buf))
for scanner.Scan() {
name := scanner.Text()
// Skip comments and empty lines.
if strings.HasPrefix(name, "//") || name == "" {
continue
}
excludes = append(excludes, name)
}
if err := scanner.Err(); err != nil {
return nil, err
}
return excludes, nil
}
func main() {
os.Exit(mainCmd(os.Args))
}

View File

@ -163,7 +163,7 @@ func (c *counter) updateExemplar(v float64, l Labels) {
// (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec.
type CounterVec struct {
*metricVec
*MetricVec
}
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
@ -176,11 +176,11 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
opts.ConstLabels,
)
return &CounterVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
}
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now}
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
result.init(result) // Init self-collection.
return result
}),
@ -188,7 +188,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
}
// GetMetricWithLabelValues returns the Counter for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Counter is created.
//
// It is possible to call this method without using the returned Counter to only
@ -202,7 +202,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
// Counter with the same label values is created later.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@ -211,7 +211,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Counter), err
}
@ -219,19 +219,19 @@ func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
}
// GetMetricWith returns the Counter for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Counter is created. Implications of
// creating a Counter without using it and keeping the Counter for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
metric, err := v.metricVec.getMetricWith(labels)
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Counter), err
}
@ -275,7 +275,7 @@ func (v *CounterVec) With(labels Labels) Counter {
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
vec, err := v.curryWith(labels)
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &CounterVec{vec}, err
}

View File

@ -51,7 +51,7 @@ type Desc struct {
// constLabelPairs contains precalculated DTO label pairs based on
// the constant labels.
constLabelPairs []*dto.LabelPair
// VariableLabels contains names of labels for which the metric
// variableLabels contains names of labels for which the metric
// maintains variable values.
variableLabels []string
// id is a hash of the values of the ConstLabels and fqName. This

View File

@ -132,7 +132,7 @@ func (g *gauge) Write(out *dto.Metric) error {
// (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec.
type GaugeVec struct {
*metricVec
*MetricVec
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
@ -145,11 +145,11 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
opts.ConstLabels,
)
return &GaugeVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
}
result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
return result
}),
@ -157,7 +157,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
}
// GetMetricWithLabelValues returns the Gauge for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Gauge is created.
//
// It is possible to call this method without using the returned Gauge to only
@ -172,7 +172,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@ -180,7 +180,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Gauge), err
}
@ -188,19 +188,19 @@ func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
}
// GetMetricWith returns the Gauge for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Gauge is created. Implications of
// creating a Gauge without using it and keeping the Gauge for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
metric, err := v.metricVec.getMetricWith(labels)
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Gauge), err
}
@ -244,7 +244,7 @@ func (v *GaugeVec) With(labels Labels) Gauge {
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
vec, err := v.curryWith(labels)
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &GaugeVec{vec}, err
}

View File

@ -58,9 +58,10 @@ type goCollector struct {
// collector will use the memstats from a previous collection if
// runtime.ReadMemStats takes more than 1s. However, if there are no previously
// collected memstats, or their collection is more than 5m ago, the collection
// will block until runtime.ReadMemStats succeeds. (The problem might be solved
// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go
// issue.)
// will block until runtime.ReadMemStats succeeds.
//
// NOTE: The problem is solved in Go 1.15, see
// https://github.com/golang/go/issues/19812 for the related Go issue.
func NewGoCollector() Collector {
return &goCollector{
goroutinesDesc: NewDesc(

View File

@ -192,7 +192,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
h := &histogram{
desc: desc,
upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
counts: [2]*histogramCounts{{}, {}},
now: time.Now,
}
@ -409,7 +409,7 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec.
type HistogramVec struct {
*metricVec
*MetricVec
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
@ -422,14 +422,14 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
opts.ConstLabels,
)
return &HistogramVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
}),
}
}
// GetMetricWithLabelValues returns the Histogram for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Histogram is created.
//
// It is possible to call this method without using the returned Histogram to only
@ -444,7 +444,7 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@ -453,7 +453,7 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Observer), err
}
@ -461,19 +461,19 @@ func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error)
}
// GetMetricWith returns the Histogram for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Histogram is created. Implications of
// creating a Histogram without using it and keeping the Histogram for later use
// are the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.metricVec.getMetricWith(labels)
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Observer), err
}
@ -517,7 +517,7 @@ func (v *HistogramVec) With(labels Labels) Observer {
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.curryWith(labels)
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &HistogramVec{vec}, err
}
@ -602,7 +602,7 @@ func NewConstHistogram(
count: count,
sum: sum,
buckets: buckets,
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
}, nil
}

View File

@ -89,7 +89,7 @@ type Opts struct {
// better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
}

View File

@ -15,7 +15,11 @@ package prometheus
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
)
type processCollector struct {
@ -149,3 +153,20 @@ func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error)
}
ch <- NewInvalidMetric(desc, err)
}
// NewPidFileFn returns a function that retrieves a pid from the specified file.
// It is meant to be used for the PidFn field in ProcessCollectorOpts.
func NewPidFileFn(pidFilePath string) func() (int, error) {
return func() (int, error) {
content, err := ioutil.ReadFile(pidFilePath)
if err != nil {
return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err)
}
pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
if err != nil {
return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err)
}
return pid, nil
}
}

View File

@ -99,7 +99,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
}
if opts.Registry != nil {
// Initialize all possibilites that can occur below.
// Initialize all possibilities that can occur below.
errCnt.WithLabelValues("gathering")
errCnt.WithLabelValues("encoding")
if err := opts.Registry.Register(errCnt); err != nil {
@ -303,8 +303,12 @@ type Logger interface {
// HandlerOpts specifies options how to serve metrics via an http.Handler. The
// zero value of HandlerOpts is a reasonable default.
type HandlerOpts struct {
// ErrorLog specifies an optional logger for errors collecting and
// serving metrics. If nil, errors are not logged at all.
// ErrorLog specifies an optional Logger for errors collecting and
// serving metrics. If nil, errors are not logged at all. Note that the
// type of a reported error is often prometheus.MultiError, which
// formats into a multi-line error string. If you want to avoid the
// latter, create a Logger implementation that detects a
// prometheus.MultiError and formats the contained errors into one line.
ErrorLog Logger
// ErrorHandling defines how errors are handled. Note that errors are
// logged regardless of the configured ErrorHandling provided ErrorLog

View File

@ -43,14 +43,14 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
// InstrumentHandlerDuration is a middleware that wraps the provided
// http.Handler to observe the request duration with the provided ObserverVec.
// The ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the request duration in seconds. Partitioning happens by HTTP
// status code and/or HTTP method if the respective instance label names are
// present in the ObserverVec. For unpartitioned observations, use an
// ObserverVec with zero labels. Note that partitioning of Histograms is
// expensive and should be used judiciously.
// The ObserverVec must have valid metric and label names and must have zero,
// one, or two non-const non-curried labels. For those, the only allowed label
// names are "code" and "method". The function panics otherwise. The Observe
// method of the Observer in the ObserverVec is called with the request duration
// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
// the respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
@ -79,12 +79,13 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht
}
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
// to observe the request result with the provided CounterVec. The CounterVec
// must have zero, one, or two non-const non-curried labels. For those, the only
// allowed label names are "code" and "method". The function panics
// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
// HTTP method if the respective instance label names are present in the
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
// to observe the request result with the provided CounterVec. The CounterVec
// must have valid metric and label names and must have zero, one, or two
// non-const non-curried labels. For those, the only allowed label names are
// "code" and "method". The function panics otherwise. Partitioning of the
// CounterVec happens by HTTP status code and/or HTTP method if the respective
// instance label names are present in the CounterVec. For unpartitioned
// counting, use a CounterVec with zero labels.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
@ -110,14 +111,15 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler)
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
// http.Handler to observe with the provided ObserverVec the request duration
// until the response headers are written. The ObserverVec must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
// are "code" and "method". The function panics otherwise. The Observe method of
// the Observer in the ObserverVec is called with the request duration in
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
// until the response headers are written. The ObserverVec must have valid
// metric and label names and must have zero, one, or two non-const non-curried
// labels. For those, the only allowed label names are "code" and "method". The
// function panics otherwise. The Observe method of the Observer in the
// ObserverVec is called with the request duration in seconds. Partitioning
// happens by HTTP status code and/or HTTP method if the respective instance
// label names are present in the ObserverVec. For unpartitioned observations,
// use an ObserverVec with zero labels. Note that partitioning of Histograms is
// expensive and should be used judiciously.
//
// If the wrapped Handler panics before calling WriteHeader, no value is
// reported.
@ -139,15 +141,15 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
}
// InstrumentHandlerRequestSize is a middleware that wraps the provided
// http.Handler to observe the request size with the provided ObserverVec. The
// ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the request size in bytes. Partitioning happens by HTTP status
// code and/or HTTP method if the respective instance label names are present in
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
// labels. Note that partitioning of Histograms is expensive and should be used
// judiciously.
// http.Handler to observe the request size with the provided ObserverVec. The
// ObserverVec must have valid metric and label names and must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
// are "code" and "method". The function panics otherwise. The Observe method of
// the Observer in the ObserverVec is called with the request size in
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
@ -174,15 +176,15 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
}
// InstrumentHandlerResponseSize is a middleware that wraps the provided
// http.Handler to observe the response size with the provided ObserverVec. The
// ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the response size in bytes. Partitioning happens by HTTP status
// code and/or HTTP method if the respective instance label names are present in
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
// labels. Note that partitioning of Histograms is expensive and should be used
// judiciously.
// http.Handler to observe the response size with the provided ObserverVec. The
// ObserverVec must have valid metric and label names and must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
// are "code" and "method". The function panics otherwise. The Observe method of
// the Observer in the ObserverVec is called with the response size in
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
@ -198,6 +200,11 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
})
}
// checkLabels returns whether the provided Collector has a non-const,
// non-curried label named "code" and/or "method". It panics if the provided
// Collector does not have a Desc or has more than one Desc or its Desc is
// invalid. It also panics if the Collector has any non-const, non-curried
// labels that are not named "code" or "method".
func checkLabels(c prometheus.Collector) (code bool, method bool) {
// TODO(beorn7): Remove this hacky way to check for instance labels
// once Descriptors can have their dimensionality queried.
@ -225,6 +232,10 @@ func checkLabels(c prometheus.Collector) (code bool, method bool) {
close(descc)
// Make sure the Collector has a valid Desc by registering it with a
// temporary registry.
prometheus.NewRegistry().MustRegister(c)
// Create a ConstMetric with the Desc. Since we don't know how many
// variable labels there are, try for as long as it needs.
for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {

View File

@ -215,6 +215,8 @@ func (err AlreadyRegisteredError) Error() string {
// by a Gatherer to report multiple errors during MetricFamily gathering.
type MultiError []error
// Error formats the contained errors as a bullet point list, preceded by the
// total number of errors. Note that this results in a multi-line string.
func (errs MultiError) Error() string {
if len(errs) == 0 {
return ""

View File

@ -110,7 +110,7 @@ type SummaryOpts struct {
// better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
// Objectives defines the quantile rank estimates with their respective
@ -208,7 +208,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
// Use the lock-free implementation of a Summary without objectives.
s := &noObjectivesSummary{
desc: desc,
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
counts: [2]*summaryCounts{{}, {}},
}
s.init(s) // Init self-collection.
@ -221,7 +221,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
objectives: opts.Objectives,
sortedObjectives: make([]float64, 0, len(opts.Objectives)),
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
hotBuf: make([]float64, 0, opts.BufCap),
coldBuf: make([]float64, 0, opts.BufCap),
@ -513,7 +513,7 @@ func (s quantSort) Less(i, j int) bool {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec.
type SummaryVec struct {
*metricVec
*MetricVec
}
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
@ -535,14 +535,14 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
opts.ConstLabels,
)
return &SummaryVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
}),
}
}
// GetMetricWithLabelValues returns the Summary for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Summary is created.
//
// It is possible to call this method without using the returned Summary to only
@ -557,7 +557,7 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@ -566,7 +566,7 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Observer), err
}
@ -574,19 +574,19 @@ func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
}
// GetMetricWith returns the Summary for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Summary is created. Implications of
// creating a Summary without using it and keeping the Summary for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.metricVec.getMetricWith(labels)
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Observer), err
}
@ -630,7 +630,7 @@ func (v *SummaryVec) With(labels Labels) Observer {
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.curryWith(labels)
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &SummaryVec{vec}, err
}
@ -716,7 +716,7 @@ func NewConstSummary(
count: count,
sum: sum,
quantiles: quantiles,
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
}, nil
}

View File

@ -63,7 +63,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val
desc: desc,
valType: valueType,
function: function,
labelPairs: makeLabelPairs(desc, nil),
labelPairs: MakeLabelPairs(desc, nil),
}
result.init(result)
return result
@ -95,7 +95,7 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues
desc: desc,
valType: valueType,
val: value,
labelPairs: makeLabelPairs(desc, labelValues),
labelPairs: MakeLabelPairs(desc, labelValues),
}, nil
}
@ -145,7 +145,14 @@ func populateMetric(
return nil
}
func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
// MakeLabelPairs is a helper function to create protobuf LabelPairs from the
// variable and constant labels in the provided Desc. The values for the
// variable labels are defined by the labelValues slice, which must be in the
// same order as the corresponding variable labels in the Desc.
//
// This function is only needed for custom Metric implementations. See MetricVec
// example.
func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
if totalLen == 0 {
// Super fast path.

View File

@ -20,12 +20,20 @@ import (
"github.com/prometheus/common/model"
)
// metricVec is a Collector to bundle metrics of the same name that differ in
// their label values. metricVec is not used directly (and therefore
// unexported). It is used as a building block for implementations of vectors of
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
// It also handles label currying.
type metricVec struct {
// MetricVec is a Collector to bundle metrics of the same name that differ in
// their label values. MetricVec is not used directly but as a building block
// for implementations of vectors of a given metric type, like GaugeVec,
// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be
// used for custom Metric implementations.
//
// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in
// FooVec and initialize it with NewMetricVec. Implement wrappers for
// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather
// than (Metric, error). Similarly, create a wrapper for CurryWith that returns
// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also
// add the convenience methods WithLabelValues, With, and MustCurryWith, which
// panic instead of returning errors. See also the MetricVec example.
type MetricVec struct {
*metricMap
curry []curriedLabelValue
@ -35,9 +43,9 @@ type metricVec struct {
hashAddByte func(h uint64, b byte) uint64
}
// newMetricVec returns an initialized metricVec.
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
return &metricVec{
// NewMetricVec returns an initialized metricVec.
func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
return &MetricVec{
metricMap: &metricMap{
metrics: map[uint64][]metricWithLabelValues{},
desc: desc,
@ -63,7 +71,7 @@ func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example.
func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
h, err := m.hashLabelValues(lvs)
if err != nil {
return false
@ -82,7 +90,7 @@ func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
//
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
func (m *metricVec) Delete(labels Labels) bool {
func (m *MetricVec) Delete(labels Labels) bool {
h, err := m.hashLabels(labels)
if err != nil {
return false
@ -95,15 +103,32 @@ func (m *metricVec) Delete(labels Labels) bool {
// show up in GoDoc.
// Describe implements Collector.
func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
// Collect implements Collector.
func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
// Reset deletes all metrics in this vector.
func (m *metricVec) Reset() { m.metricMap.Reset() }
func (m *MetricVec) Reset() { m.metricMap.Reset() }
func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the MetricVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
//
// Note that CurryWith is usually not called directly but through a wrapper
// around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
var (
newCurry []curriedLabelValue
oldCurry = m.curry
@ -128,7 +153,7 @@ func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
}
return &metricVec{
return &MetricVec{
metricMap: m.metricMap,
curry: newCurry,
hashAdd: m.hashAdd,
@ -136,7 +161,34 @@ func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
}, nil
}
func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
// GetMetricWithLabelValues returns the Metric for the given slice of label
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Metric is created (by
// calling the newMetric function provided during construction of the
// MetricVec).
//
// It is possible to call this method without using the returned Metry to only
// create the new Metric but leave it in its intitial state.
//
// Keeping the Metric for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Metric from the MetricVec. In that case, the
// Metric will still exist, but it will not be exported anymore, even if a
// Metric with the same label values is created later.
//
// An error is returned if the number of label values is not the same as the
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
//
// Note that GetMetricWithLabelValues is usually not called directly but through
// a wrapper around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
@ -145,7 +197,23 @@ func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
}
func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
// GetMetricWith returns the Metric for the given Labels map (the label names
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Metric is created. Implications of
// creating a Metric without using it and keeping the Metric for later use
// are the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
//
// Note that GetMetricWith is usually not called directly but through a wrapper
// around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
@ -154,7 +222,7 @@ func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
}
func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
return 0, err
}
@ -177,7 +245,7 @@ func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
return h, nil
}
func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
return 0, err
}
@ -276,7 +344,9 @@ func (m *metricMap) deleteByHashWithLabelValues(
}
if len(metrics) > 1 {
old := metrics
m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
old[len(old)-1] = metricWithLabelValues{}
} else {
delete(m.metrics, h)
}
@ -302,7 +372,9 @@ func (m *metricMap) deleteByHashWithLabels(
}
if len(metrics) > 1 {
old := metrics
m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
old[len(old)-1] = metricWithLabelValues{}
} else {
delete(m.metrics, h)
}

View File

@ -32,7 +32,9 @@ import (
// in a no-op Registerer.
//
// WrapRegistererWith provides a way to add fixed labels to a subset of
// Collectors. It should not be used to add fixed labels to all metrics exposed.
// Collectors. It should not be used to add fixed labels to all metrics
// exposed. See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
//
// Conflicts between Collectors registered through the original Registerer with
// Collectors registered through the wrapping Registerer will still be

View File

@ -164,7 +164,7 @@ func (sd *SampleDecoder) Decode(s *model.Vector) error {
}
// ExtractSamples builds a slice of samples from the provided metric
// families. If an error occurrs during sample extraction, it continues to
// families. If an error occurs during sample extraction, it continues to
// extract from the remaining metric families. The returned error is the last
// error that has occurred.
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {

View File

@ -299,6 +299,17 @@ func (p *TextParser) startLabelName() stateFn {
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
return nil
}
// Check for duplicate label names.
labels := make(map[string]struct{})
for _, l := range p.currentMetric.Label {
lName := l.GetName()
if _, exists := labels[lName]; !exists {
labels[lName] = struct{}{}
} else {
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
return nil
}
}
return p.startLabelValue
}

View File

@ -20,7 +20,7 @@ const (
prime64 = 1099511628211
)
// hashNew initializies a new fnv64a hash value.
// hashNew initializes a new fnv64a hash value.
func hashNew() uint64 {
return offset64
}

View File

@ -181,77 +181,89 @@ func (d *Duration) Type() string {
return "duration"
}
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
func ParseDuration(durationStr string) (Duration, error) {
// Allow 0 without a unit.
if durationStr == "0" {
switch durationStr {
case "0":
// Allow 0 without a unit.
return 0, nil
case "":
return 0, fmt.Errorf("empty duration string")
}
matches := durationRE.FindStringSubmatch(durationStr)
if len(matches) != 3 {
if matches == nil {
return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
}
var (
n, _ = strconv.Atoi(matches[1])
dur = time.Duration(n) * time.Millisecond
)
switch unit := matches[2]; unit {
case "y":
dur *= 1000 * 60 * 60 * 24 * 365
case "w":
dur *= 1000 * 60 * 60 * 24 * 7
case "d":
dur *= 1000 * 60 * 60 * 24
case "h":
dur *= 1000 * 60 * 60
case "m":
dur *= 1000 * 60
case "s":
dur *= 1000
case "ms":
// Value already correct
default:
return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
var dur time.Duration
// Parse the match at pos `pos` in the regex and use `mult` to turn that
// into ms, then add that value to the total parsed duration.
m := func(pos int, mult time.Duration) {
if matches[pos] == "" {
return
}
n, _ := strconv.Atoi(matches[pos])
d := time.Duration(n) * time.Millisecond
dur += d * mult
}
m(2, 1000*60*60*24*365) // y
m(4, 1000*60*60*24*7) // w
m(6, 1000*60*60*24) // d
m(8, 1000*60*60) // h
m(10, 1000*60) // m
m(12, 1000) // s
m(14, 1) // ms
return Duration(dur), nil
}
func (d Duration) String() string {
var (
ms = int64(time.Duration(d) / time.Millisecond)
unit = "ms"
ms = int64(time.Duration(d) / time.Millisecond)
r = ""
)
if ms == 0 {
return "0s"
}
factors := map[string]int64{
"y": 1000 * 60 * 60 * 24 * 365,
"w": 1000 * 60 * 60 * 24 * 7,
"d": 1000 * 60 * 60 * 24,
"h": 1000 * 60 * 60,
"m": 1000 * 60,
"s": 1000,
"ms": 1,
f := func(unit string, mult int64, exact bool) {
if exact && ms%mult != 0 {
return
}
if v := ms / mult; v > 0 {
r += fmt.Sprintf("%d%s", v, unit)
ms -= v * mult
}
}
switch int64(0) {
case ms % factors["y"]:
unit = "y"
case ms % factors["w"]:
unit = "w"
case ms % factors["d"]:
unit = "d"
case ms % factors["h"]:
unit = "h"
case ms % factors["m"]:
unit = "m"
case ms % factors["s"]:
unit = "s"
}
return fmt.Sprintf("%v%v", ms/factors[unit], unit)
// Only format years and weeks if the remainder is zero, as it is often
// easier to read 90d than 12w6d.
f("y", 1000*60*60*24*365, true)
f("w", 1000*60*60*24*7, true)
f("d", 1000*60*60*24, false)
f("h", 1000*60*60, false)
f("m", 1000*60, false)
f("s", 1000, false)
f("ms", 1, false)
return r
}
// MarshalText implements the encoding.TextMarshaler interface.
func (d *Duration) MarshalText() ([]byte, error) {
return []byte(d.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (d *Duration) UnmarshalText(text []byte) error {
var err error
*d, err = ParseDuration(string(text))
return err
}
// MarshalYAML implements the yaml.Marshaler interface.

View File

@ -9,6 +9,7 @@ go_library(
"cpuinfo_armx.go",
"cpuinfo_mipsx.go",
"cpuinfo_ppcx.go",
"cpuinfo_riscvx.go",
"cpuinfo_s390x.go",
"cpuinfo_x86.go",
"crypto.go",
@ -24,8 +25,11 @@ go_library(
"mountstats.go",
"net_conntrackstat.go",
"net_dev.go",
"net_ip_socket.go",
"net_protocols.go",
"net_sockstat.go",
"net_softnet.go",
"net_tcp.go",
"net_udp.go",
"net_unix.go",
"proc.go",
@ -41,6 +45,7 @@ go_library(
"proc_stat.go",
"proc_status.go",
"schedstat.go",
"slab.go",
"stat.go",
"swaps.go",
"vm.go",

View File

@ -78,7 +78,7 @@ ifneq ($(shell which gotestsum),)
endif
endif
PROMU_VERSION ?= 0.5.0
PROMU_VERSION ?= 0.7.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
GOLANGCI_LINT :=
@ -245,10 +245,12 @@ common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest
common-docker-manifest:

6
vendor/github.com/prometheus/procfs/SECURITY.md generated vendored Normal file
View File

@ -0,0 +1,6 @@
# Reporting a security issue
The Prometheus security policy, including how to report vulnerabilities, can be
found here:
https://prometheus.io/docs/operating/security/

View File

@ -36,7 +36,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err)
return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
}
return parseARPEntries(data)
@ -59,7 +59,7 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth {
entry, err := parseARPEntry(columns)
if err != nil {
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err)
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err)
}
entries = append(entries, entry)
} else {

View File

@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil {
return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
return nil, fmt.Errorf("invalid value in buddyinfo: %w", err)
}
}

View File

@ -19,6 +19,7 @@ import (
"bufio"
"bytes"
"errors"
"fmt"
"regexp"
"strconv"
"strings"
@ -77,7 +78,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine)
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@ -192,7 +193,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine)
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@ -256,7 +257,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine)
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@ -281,7 +282,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
if strings.HasPrefix(line, "processor") {
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
if len(match) < 2 {
return nil, errors.New("Invalid line found in cpuinfo: " + line)
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
cpu := commonCPUInfo
v, err := strconv.ParseUint(match[1], 0, 32)
@ -313,6 +314,22 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
return nil, err
}
cpuinfo[i].CPUMHz = v
case "physical id":
cpuinfo[i].PhysicalID = field[1]
case "core id":
cpuinfo[i].CoreID = field[1]
case "cpu cores":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CPUCores = uint(v)
case "siblings":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Siblings = uint(v)
}
}
@ -325,7 +342,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine)
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@ -367,7 +384,7 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine)
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@ -412,7 +429,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine)
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)

19
vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build riscv riscv64
package procfs
var parseCPUInfo = parseCPUInfoRISCV

View File

@ -55,12 +55,12 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil {
return nil, fmt.Errorf("error reading crypto %s: %s", path, err)
return nil, fmt.Errorf("error reading crypto %q: %w", path, err)
}
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", path, err)
return nil, fmt.Errorf("error parsing crypto %q: %w", path, err)
}
return crypto, nil

View File

@ -111,7 +111,7 @@ Max core file size 0 unlimited bytes
Max resident set unlimited unlimited bytes
Max processes 62898 62898 processes
Max open files 2048 4096 files
Max locked memory 65536 65536 bytes
Max locked memory 18446744073708503040 18446744073708503040 bytes
Max address space 8589934592 unlimited bytes
Max file locks unlimited unlimited locks
Max pending signals 62898 62898 signals
@ -1080,7 +1080,6 @@ internal : yes
type : skcipher
async : yes
blocksize : 1
min keysize : 16
max keysize : 32
ivsize : 16
chunksize : 16
@ -1839,6 +1838,7 @@ min keysize : 16
max keysize : 32
Mode: 444
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/diskstats
Lines: 52
@ -2129,6 +2129,24 @@ Lines: 6
4 1FB3C 0 1282A8F 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/protocols
Lines: 14
protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em
PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n
PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n
RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n
UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n
UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n
TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y
UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n
UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n
PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n
RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n
UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n
TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y
NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/proc/net/rpc
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@ -2186,10 +2204,25 @@ Lines: 1
00015c73 00020e76 F0000769 00000000
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/tcp
Lines: 4
sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/tcp6
Lines: 3
sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0
6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/udp
Lines: 4
sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
0: 0A000005:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
Mode: 644
@ -2292,6 +2325,312 @@ Mode: 644
Path: fixtures/proc/self
SymlinkTo: 26231
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/slabinfo
Lines: 302
slabinfo - version: 2.1
# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab> : tunables <limit> <batchcount> <sharedfactor> : slabdata <active_slabs> <num_slabs> <sharedavail>
pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0
pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0
nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0
kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0
kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0
kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0
pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0
x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0
iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0
ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0
bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0
bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0
fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0
fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0
squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0
fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0
fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0
xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0
xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0
nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0
nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0
nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0
nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0
jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0
jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0
reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0
btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0
btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0
ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0
ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0
ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0
ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0
ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0
ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0
ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0
ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0
jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0
jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0
jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0
jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0
jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0
ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0
mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0
dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0
dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0
dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0
kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0
io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0
dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0
aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0
qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0
sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0
scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0
virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0
RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0
UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0
UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0
tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0
TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0
uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0
sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0
sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0
sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0
bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0
mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0
isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0
io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0
aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0
dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0
bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0
posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0
iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0
iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0
UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0
ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0
ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0
inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0
xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0
ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0
ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0
ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0
PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0
UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0
tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0
request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0
TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0
hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0
dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0
eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0
inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0
scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0
request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0
blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0
bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0
biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0
biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0
biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0
biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0
bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0
ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0
uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0
dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0
audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0
sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0
skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0
skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0
configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0
file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0
file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0
fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0
net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0
task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0
taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0
proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0
pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0
proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0
seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0
sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0
bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0
shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0
kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0
kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0
mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0
inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0
dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0
names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0
hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0
iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0
lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0
key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0
uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0
nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0
vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0
mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0
fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0
files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0
signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0
sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0
task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0
cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0
anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0
anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0
pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0
Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0
Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0
Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0
Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0
trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0
ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0
pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0
radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0
task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0
vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0
dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0
kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0
kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0
kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0
kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0
kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0
kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0
kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0
kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0
kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0
kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0
kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0
kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0
kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0
kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0
kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0
kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/stat
Lines: 16
cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
@ -4639,6 +4978,35 @@ Mode: 644
Directory: fixtures/sys/devices/system
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/devices/system/node
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/devices/system/node/node1
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/devices/system/node/node1/vmstat
Lines: 6
nr_free_pages 1
nr_zone_inactive_anon 2
nr_zone_active_anon 3
nr_zone_inactive_file 4
nr_zone_active_file 5
nr_zone_unevictable 6
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/devices/system/node/node2
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/devices/system/node/node2/vmstat
Lines: 6
nr_free_pages 7
nr_zone_inactive_anon 8
nr_zone_active_anon 9
nr_zone_inactive_file 10
nr_zone_active_file 11
nr_zone_unevictable 12
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/devices/system/clocksource
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

View File

@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil {
return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %v", err)
return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err)
}
return *m, nil

View File

@ -1,9 +1,9 @@
module github.com/prometheus/procfs
go 1.12
go 1.13
require (
github.com/google/go-cmp v0.3.1
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e
github.com/google/go-cmp v0.5.4
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
)

View File

@ -1,6 +1,8 @@
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e h1:LwyF2AFISC9nVbS6MgzsaQNSUsRXI49GS+YQ5KX/QH0=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -39,10 +39,10 @@ type FS string
func NewFS(mountPoint string) (FS, error) {
info, err := os.Stat(mountPoint)
if err != nil {
return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
return "", fmt.Errorf("could not read %q: %w", mountPoint, err)
}
if !info.IsDir() {
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
return "", fmt.Errorf("mount point %q is not a directory", mountPoint)
}
return FS(mountPoint), nil

View File

@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 {
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes))
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes))
}
var err error
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
return nil, fmt.Errorf("could not parse load '%s': %s", load, err)
return nil, fmt.Errorf("could not parse load %q: %w", load, err)
}
}
return &LoadAvg{

Some files were not shown because too many files have changed in this diff Show More