mirror of https://github.com/kubernetes/kops.git
Update dependencies
This commit is contained in:
parent
29510977de
commit
d5007b95f1
68
go.mod
68
go.mod
|
|
@ -14,7 +14,7 @@ require (
|
|||
github.com/Masterminds/sprig/v3 v3.2.2
|
||||
github.com/apparentlymart/go-cidr v1.1.0
|
||||
github.com/aws/amazon-ec2-instance-selector/v2 v2.4.0
|
||||
github.com/aws/aws-sdk-go v1.44.130
|
||||
github.com/aws/aws-sdk-go v1.44.135
|
||||
github.com/blang/semver/v4 v4.0.0
|
||||
github.com/cert-manager/cert-manager v1.10.0
|
||||
github.com/digitalocean/godo v1.89.0
|
||||
|
|
@ -22,52 +22,52 @@ require (
|
|||
github.com/go-logr/logr v1.2.3
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/google/go-containerregistry v0.12.0
|
||||
github.com/google/go-containerregistry v0.12.1
|
||||
github.com/google/go-tpm v0.3.3
|
||||
github.com/google/go-tpm-tools v0.3.9
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gophercloud/gophercloud v1.0.0
|
||||
github.com/hashicorp/hcl/v2 v2.14.1
|
||||
github.com/hashicorp/hcl/v2 v2.15.0
|
||||
github.com/hashicorp/vault/api v1.8.2
|
||||
github.com/hetznercloud/hcloud-go v1.35.3
|
||||
github.com/hetznercloud/hcloud-go v1.37.0
|
||||
github.com/jacksontj/memberlistmesh v0.0.0-20190905163944-93462b9d2bb7
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/pelletier/go-toml v1.9.5
|
||||
github.com/pkg/sftp v1.13.5
|
||||
github.com/prometheus/client_golang v1.13.1
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10
|
||||
github.com/sergi/go-diff v1.2.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.13.0
|
||||
github.com/spf13/viper v1.14.0
|
||||
github.com/spotinst/spotinst-sdk-go v1.131.1
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/weaveworks/mesh v0.0.0-20191105120815-58dbcc3e8e63
|
||||
github.com/zclconf/go-cty v1.12.0
|
||||
github.com/zclconf/go-cty v1.12.1
|
||||
go.uber.org/multierr v1.8.0
|
||||
golang.org/x/crypto v0.1.0
|
||||
golang.org/x/net v0.1.0
|
||||
golang.org/x/oauth2 v0.1.0
|
||||
golang.org/x/crypto v0.2.0
|
||||
golang.org/x/net v0.2.0
|
||||
golang.org/x/oauth2 v0.2.0
|
||||
golang.org/x/sync v0.1.0
|
||||
golang.org/x/sys v0.1.0
|
||||
google.golang.org/api v0.102.0
|
||||
golang.org/x/sys v0.2.0
|
||||
google.golang.org/api v0.103.0
|
||||
gopkg.in/gcfg.v1 v1.2.3
|
||||
gopkg.in/inf.v0 v0.9.1
|
||||
gopkg.in/square/go-jose.v2 v2.6.0
|
||||
helm.sh/helm/v3 v3.10.1
|
||||
k8s.io/api v0.25.3
|
||||
k8s.io/apimachinery v0.25.3
|
||||
k8s.io/cli-runtime v0.25.3
|
||||
k8s.io/client-go v0.25.3
|
||||
helm.sh/helm/v3 v3.10.2
|
||||
k8s.io/api v0.25.4
|
||||
k8s.io/apimachinery v0.25.4
|
||||
k8s.io/cli-runtime v0.25.4
|
||||
k8s.io/client-go v0.25.4
|
||||
k8s.io/cloud-provider-aws v1.25.1
|
||||
k8s.io/component-base v0.25.3
|
||||
k8s.io/component-base v0.25.4
|
||||
k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9
|
||||
k8s.io/klog/v2 v2.80.1
|
||||
k8s.io/kubectl v0.25.3
|
||||
k8s.io/kubelet v0.25.3
|
||||
k8s.io/legacy-cloud-providers v0.25.3
|
||||
k8s.io/mount-utils v0.25.3
|
||||
k8s.io/utils v0.0.0-20221101230645-61b03e2f6476
|
||||
k8s.io/kubectl v0.25.4
|
||||
k8s.io/kubelet v0.25.4
|
||||
k8s.io/legacy-cloud-providers v0.25.4
|
||||
k8s.io/mount-utils v0.25.4
|
||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2
|
||||
sigs.k8s.io/controller-runtime v0.13.1
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
|
@ -90,7 +90,7 @@ require (
|
|||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/agext/levenshtein v1.2.1 // indirect
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
|
||||
github.com/armon/go-metrics v0.3.10 // indirect
|
||||
github.com/armon/go-metrics v0.4.0 // indirect
|
||||
github.com/armon/go-radix v1.0.0 // indirect
|
||||
github.com/atotto/clipboard v0.1.4 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
|
|
@ -119,7 +119,7 @@ require (
|
|||
github.com/evertras/bubble-table v0.14.4 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-errors/errors v1.0.1 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
||||
|
|
@ -134,7 +134,7 @@ require (
|
|||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.6.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
|
|
@ -204,7 +204,7 @@ require (
|
|||
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
|
|
@ -215,23 +215,23 @@ require (
|
|||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
||||
github.com/shopspring/decimal v1.2.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
github.com/spf13/afero v1.9.2 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.1 // indirect
|
||||
github.com/vbatts/tar-split v0.11.2 // indirect
|
||||
github.com/xlab/treeprint v1.1.0 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/term v0.1.0 // indirect
|
||||
golang.org/x/term v0.2.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e // indirect
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c // indirect
|
||||
google.golang.org/grpc v1.50.1 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
|
|
@ -239,8 +239,8 @@ require (
|
|||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.25.2 // indirect
|
||||
k8s.io/cloud-provider v0.25.3 // indirect
|
||||
k8s.io/csi-translation-lib v0.25.3 // indirect
|
||||
k8s.io/cloud-provider v0.25.4 // indirect
|
||||
k8s.io/csi-translation-lib v0.25.4 // indirect
|
||||
k8s.io/klog v1.0.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220803164354-a70c9af30aea // indirect
|
||||
oras.land/oras-go v1.2.0 // indirect
|
||||
|
|
|
|||
141
go.sum
141
go.sum
|
|
@ -27,6 +27,7 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD
|
|||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
|
@ -39,6 +40,7 @@ cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22m
|
|||
cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/longrunning v0.1.1 h1:y50CXG4j0+qvEukslYFBCrzaXX0qpFbBzc3PchSu/LE=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
|
|
@ -121,8 +123,8 @@ github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6
|
|||
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
|
||||
github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||
github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q=
|
||||
github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
|
||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
|
|
@ -131,8 +133,8 @@ github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z
|
|||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aws/amazon-ec2-instance-selector/v2 v2.4.0 h1:9l68/pwVYm6EOAeBmoVUL4ekw6VlbwtPyX9/F+IpMxQ=
|
||||
github.com/aws/amazon-ec2-instance-selector/v2 v2.4.0/go.mod h1:AEJrtkLkCkfIBIazidrVrgZqaXl+9dxI/wRgjdw+7G0=
|
||||
github.com/aws/aws-sdk-go v1.44.130 h1:a/qwOxmYJF47xTZvTjECSJXnfRbjegb3YxvCXfETtnY=
|
||||
github.com/aws/aws-sdk-go v1.44.130/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.135 h1:DJJP/CkEpgafA5p5jlY9VzDRyKrfABVixzIxrK/3tWU=
|
||||
github.com/aws/aws-sdk-go v1.44.135/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
|
|
@ -251,8 +253,8 @@ github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM
|
|||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
|
||||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
|
|
@ -353,8 +355,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-containerregistry v0.12.0 h1:nidOEtFYlgPCRqxCKj/4c/js940HVWplCWc5ftdfdUA=
|
||||
github.com/google/go-containerregistry v0.12.0/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k=
|
||||
github.com/google/go-containerregistry v0.12.1 h1:W1mzdNUTx4Zla4JaixCRLhORcR7G6KxE5hHl5fkPsp8=
|
||||
github.com/google/go-containerregistry v0.12.1/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/go-tpm v0.1.2-0.20190725015402-ae6dd98980d4/go.mod h1:H9HbmUG2YgV/PHITkO7p6wxEEj/v5nlsVWIwumwH2NI=
|
||||
|
|
@ -403,8 +405,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
|||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
|
||||
github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU=
|
||||
github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
|
||||
github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
github.com/gophercloud/gophercloud v1.0.0 h1:9nTGx0jizmHxDobe4mck89FyQHVyA3CaXLIUSGJjP9k=
|
||||
github.com/gophercloud/gophercloud v1.0.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c=
|
||||
|
|
@ -465,8 +467,8 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l
|
|||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/hcl/v2 v2.14.1 h1:x0BpjfZ+CYdbiz+8yZTQ+gdLO7IXvOut7Da+XJayx34=
|
||||
github.com/hashicorp/hcl/v2 v2.14.1/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0=
|
||||
github.com/hashicorp/hcl/v2 v2.15.0 h1:CPDXO6+uORPjKflkWCCwoWc9uRp+zSIPcCQ+BrxV7m8=
|
||||
github.com/hashicorp/hcl/v2 v2.15.0/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng=
|
||||
github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM=
|
||||
github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
|
||||
|
|
@ -476,8 +478,8 @@ github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZT
|
|||
github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
github.com/hetznercloud/hcloud-go v1.35.3 h1:WCmFAhLRooih2QHAsbCbEdpIHnshQQmrPqsr3rHE1Ow=
|
||||
github.com/hetznercloud/hcloud-go v1.35.3/go.mod h1:mepQwR6va27S3UQthaEPGS86jtzSY9xWL1e9dyxXpgA=
|
||||
github.com/hetznercloud/hcloud-go v1.37.0 h1:Uwu7OKfZvar86LfJuzItStoO1AL7DVDCqWzRGzrvdEw=
|
||||
github.com/hetznercloud/hcloud-go v1.37.0/go.mod h1:mepQwR6va27S3UQthaEPGS86jtzSY9xWL1e9dyxXpgA=
|
||||
github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw=
|
||||
github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
|
|
@ -666,13 +668,14 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
|
|||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.1 h1:3gMjIY2+/hzmqhtUC/aQNYldJA6DtH3CgQvwS+02K1c=
|
||||
github.com/prometheus/client_golang v1.13.1/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
|
|
@ -714,8 +717,8 @@ github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkB
|
|||
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
||||
github.com/sahilm/fuzzy v0.1.0 h1:FzWGaw2Opqyu+794ZQ9SYifWv2EIXpwP4q8dY1kDAwI=
|
||||
github.com/sahilm/fuzzy v0.1.0/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 h1:0roa6gXKgyta64uqh52AQG3wzZXH21unn+ltzQSXML0=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10 h1:wsfMs0iv+MJiViM37qh5VEKISi3/ZUq2nNKNdqmumAs=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
|
|
@ -732,8 +735,8 @@ github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs
|
|||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=
|
||||
github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||
github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
|
||||
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
||||
|
|
@ -750,8 +753,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
|||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU=
|
||||
github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw=
|
||||
github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU=
|
||||
github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As=
|
||||
github.com/spotinst/spotinst-sdk-go v1.131.1 h1:8WGvAc1PKno221aSnzc42goFwgehHw7DKBWve437IyE=
|
||||
github.com/spotinst/spotinst-sdk-go v1.131.1/go.mod h1:C6mrT7+mqOgPyabacjyYTvilu8Xm96mvTvrZQhj99WI=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
|
|
@ -794,11 +797,12 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE=
|
||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY=
|
||||
github.com/zclconf/go-cty v1.12.0 h1:F5E/vbilcrCtat9sYcEjlwwg1mDqbRTjyXR57nnx5sc=
|
||||
github.com/zclconf/go-cty v1.12.0/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA=
|
||||
github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY=
|
||||
github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
|
|
@ -806,8 +810,9 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc=
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
|
||||
|
|
@ -840,8 +845,8 @@ golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5y
|
|||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE=
|
||||
golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
|
@ -877,6 +882,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
@ -931,8 +937,10 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
|
@ -950,8 +958,8 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y=
|
||||
golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=
|
||||
golang.org/x/oauth2 v0.2.0 h1:GtQkldQ9m7yvzCL1V+LrYow3Khe0eJH0w7RbX/VbaIU=
|
||||
golang.org/x/oauth2 v0.2.0/go.mod h1:Cwn6afJ8jrQwYMxQDTpISoXmXW9I6qF6vDeuuoX3Ibs=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
|
@ -963,6 +971,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
@ -1041,14 +1050,18 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -1163,8 +1176,8 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv
|
|||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||
google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4=
|
||||
google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I=
|
||||
google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo=
|
||||
google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ=
|
||||
google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
|
@ -1233,8 +1246,8 @@ google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEc
|
|||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y=
|
||||
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo=
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
|
@ -1317,8 +1330,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
helm.sh/helm/v3 v3.10.1 h1:uTnNlYx8QcTSNA4ZJ50Llwife4CSohUY4ehumyVf2QE=
|
||||
helm.sh/helm/v3 v3.10.1/go.mod h1:CXOcs02AYvrlPMWARNYNRgf2rNP7gLJQsi/Ubd4EDrI=
|
||||
helm.sh/helm/v3 v3.10.2 h1:2PmN9NgmqTn5pswfL5Kh2LxOKjkmh0hxKLe6/J0yUY4=
|
||||
helm.sh/helm/v3 v3.10.2/go.mod h1:CXOcs02AYvrlPMWARNYNRgf2rNP7gLJQsi/Ubd4EDrI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
@ -1326,24 +1339,24 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ=
|
||||
k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI=
|
||||
k8s.io/api v0.25.4 h1:3YO8J4RtmG7elEgaWMb4HgmpS2CfY1QlaOz9nwB+ZSs=
|
||||
k8s.io/api v0.25.4/go.mod h1:IG2+RzyPQLllQxnhzD8KQNEu4c4YvyDTpSMztf4A0OQ=
|
||||
k8s.io/apiextensions-apiserver v0.25.2 h1:8uOQX17RE7XL02ngtnh3TgifY7EhekpK+/piwzQNnBo=
|
||||
k8s.io/apiextensions-apiserver v0.25.2/go.mod h1:iRwwRDlWPfaHhuBfQ0WMa5skdQfrE18QXJaJvIDLvE8=
|
||||
k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc=
|
||||
k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo=
|
||||
k8s.io/cli-runtime v0.25.3 h1:Zs7P7l7db/5J+KDePOVtDlArAa9pZXaDinGWGZl0aM8=
|
||||
k8s.io/cli-runtime v0.25.3/go.mod h1:InHHsjkyW5hQsILJGpGjeruiDZT/R0OkROQgD6GzxO4=
|
||||
k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0=
|
||||
k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA=
|
||||
k8s.io/cloud-provider v0.25.3 h1:1X1BKXm0fp8/ZkaQKNDyWqgh6t7m9O5MDSbO9OA4muk=
|
||||
k8s.io/cloud-provider v0.25.3/go.mod h1:P7TjzjbkqW3C0NAT1bNEZrZRifNNBVhrTb+iHRjfFz0=
|
||||
k8s.io/apimachinery v0.25.4 h1:CtXsuaitMESSu339tfhVXhQrPET+EiWnIY1rcurKnAc=
|
||||
k8s.io/apimachinery v0.25.4/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo=
|
||||
k8s.io/cli-runtime v0.25.4 h1:GTSBN7aKBrc2LqpdO30CmHQqJtRmotxV7XsMSP+QZIk=
|
||||
k8s.io/cli-runtime v0.25.4/go.mod h1:JGOw1CR8v4Mcz6cEKA7bFQe0bPrNn1l5sGAX1/Ke4Eg=
|
||||
k8s.io/client-go v0.25.4 h1:3RNRDffAkNU56M/a7gUfXaEzdhZlYhoW8dgViGy5fn8=
|
||||
k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw=
|
||||
k8s.io/cloud-provider v0.25.4 h1:juIfru1jVX6BlDWcJ18sv6aWxMSMmcjWf2HNXUtnkiI=
|
||||
k8s.io/cloud-provider v0.25.4/go.mod h1:L018fvnYxHrJP93UNSu8ODZYd/HCukliBzzNsV4TqC0=
|
||||
k8s.io/cloud-provider-aws v1.25.1 h1:coux06A4qvG6/IDt5a2YBKl5o9j3YAzT6dArZDUawBA=
|
||||
k8s.io/cloud-provider-aws v1.25.1/go.mod h1:BizrTUsF5lW3esndEaE++FW8P0ENBdAiKptj0BQrTBo=
|
||||
k8s.io/component-base v0.25.3 h1:UrsxciGdrCY03ULT1h/S/gXFCOPnLhUVwSyx+hM/zq4=
|
||||
k8s.io/component-base v0.25.3/go.mod h1:WYoS8L+IlTZgU7rhAl5Ctpw0WdMxDfCC5dkxcEFa/TI=
|
||||
k8s.io/csi-translation-lib v0.25.3 h1:gNHocpHaD4O+QZT31mHFvwC8ZFl5q0Tp7BvhV900HPo=
|
||||
k8s.io/csi-translation-lib v0.25.3/go.mod h1:hrosK8ufTX5fz1CJO79EfPPkuLZWvaxEb4tovbcv/AU=
|
||||
k8s.io/component-base v0.25.4 h1:n1bjg9Yt+G1C0WnIDJmg2fo6wbEU1UGMRiQSjmj7hNQ=
|
||||
k8s.io/component-base v0.25.4/go.mod h1:nnZJU8OP13PJEm6/p5V2ztgX2oyteIaAGKGMYb2L2cY=
|
||||
k8s.io/csi-translation-lib v0.25.4 h1:y8+C2sVIkA9K3Q8yofMc8DJzHUyKdFXlHk563nR1Kgc=
|
||||
k8s.io/csi-translation-lib v0.25.4/go.mod h1:T8l3i4NTGQ0xgXiBHEr4euaO1kw59sNrH57Fa6SXBCY=
|
||||
k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 h1:iu3o/SxaHVI7tKPtkGzD3M9IzrE21j+CUKH98NQJ8Ms=
|
||||
k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
|
|
@ -1355,16 +1368,16 @@ k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
|||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-openapi v0.0.0-20220803164354-a70c9af30aea h1:3QOH5+2fGsY8e1qf+GIFpg+zw/JGNrgyZRQR7/m6uWg=
|
||||
k8s.io/kube-openapi v0.0.0-20220803164354-a70c9af30aea/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
|
||||
k8s.io/kubectl v0.25.3 h1:HnWJziEtmsm4JaJiKT33kG0kadx68MXxUE8UEbXnN4U=
|
||||
k8s.io/kubectl v0.25.3/go.mod h1:glU7PiVj/R6Ud4A9FJdTcJjyzOtCJyc0eO7Mrbh3jlI=
|
||||
k8s.io/kubelet v0.25.3 h1:PjT3Xo0VL1BpRilBpZrRN8pSy6w5pGQ0YDQQeQWSHvQ=
|
||||
k8s.io/kubelet v0.25.3/go.mod h1:YopVc6vLhveZb22I7AzcoWPap+t3/KJKqRZDa2MZmyE=
|
||||
k8s.io/legacy-cloud-providers v0.25.3 h1:v7M/U8jE6kJn2gEBVofDApCOkgUlsFGBTXosTKVTtAo=
|
||||
k8s.io/legacy-cloud-providers v0.25.3/go.mod h1:0l3ulE+R3UXrVSfevmLvKSqJluRX/ABedGLGfpYf9t0=
|
||||
k8s.io/mount-utils v0.25.3 h1:Eb4MDClmozX3Vrz4ZtoG0bQ/pGhT5gyo28p3f+0r9EE=
|
||||
k8s.io/mount-utils v0.25.3/go.mod h1:odpFnGwJfFjN3SRnjfGS0902ubcj/W6hDOrNDmSSINo=
|
||||
k8s.io/utils v0.0.0-20221101230645-61b03e2f6476 h1:L14f2LWkOxG2rYsuSA3ltQnnST1vMfek/GUk+VemxD4=
|
||||
k8s.io/utils v0.0.0-20221101230645-61b03e2f6476/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/kubectl v0.25.4 h1:O3OA1z4V1ZyvxCvScjq0pxAP7ABgznr8UvnVObgI6Dc=
|
||||
k8s.io/kubectl v0.25.4/go.mod h1:CKMrQ67Bn2YCP26tZStPQGq62zr9pvzEf65A0navm8k=
|
||||
k8s.io/kubelet v0.25.4 h1:24MmTTQGBHr08UkMYFC/RaLjuiMREM53HfRgJKWRquI=
|
||||
k8s.io/kubelet v0.25.4/go.mod h1:dWAxzvWR7B6LrSgE+6H6Dc7bOzNOzm+O+W6zLic9daA=
|
||||
k8s.io/legacy-cloud-providers v0.25.4 h1:mbYPwS81rinU1DKtbc42nXXxQ7Lu7yYz6UirTPz23Mw=
|
||||
k8s.io/legacy-cloud-providers v0.25.4/go.mod h1:20tayVpZTfvZnxH2vz21SLQvHh7IhKcAE75hcnuIaDU=
|
||||
k8s.io/mount-utils v0.25.4 h1:+j1GBo6rH6sM1GvOI4jcu1IpjA5WssuwW7UEkQevaTU=
|
||||
k8s.io/mount-utils v0.25.4/go.mod h1:odpFnGwJfFjN3SRnjfGS0902ubcj/W6hDOrNDmSSINo=
|
||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 h1:GfD9OzL11kvZN5iArC6oTS7RTj7oJOIfnislxYlqTj8=
|
||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
oras.land/oras-go v1.2.0 h1:yoKosVIbsPoFMqAIFHTnrmOuafHal+J/r+I5bdbVWu4=
|
||||
oras.land/oras-go v1.2.0/go.mod h1:pFNs7oHp2dYsYMSS82HaX5l4mpnGO7hbpPN6EWH2ltc=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
|
|
|
|||
|
|
@ -12,8 +12,8 @@ require (
|
|||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
github.com/octago/sflags v0.2.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
k8s.io/api v0.25.3
|
||||
k8s.io/apimachinery v0.25.3
|
||||
k8s.io/api v0.25.4
|
||||
k8s.io/apimachinery v0.25.4
|
||||
k8s.io/client-go v11.0.1-0.20190805182717-6502b5e7b1b5+incompatible
|
||||
k8s.io/klog/v2 v2.80.1
|
||||
k8s.io/kops v1.24.1
|
||||
|
|
@ -23,11 +23,11 @@ require (
|
|||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.104.0 // indirect
|
||||
cloud.google.com/go v0.105.0 // indirect
|
||||
cloud.google.com/go/compute v1.12.1 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.1 // indirect
|
||||
cloud.google.com/go/iam v0.3.0 // indirect
|
||||
cloud.google.com/go/storage v1.25.0 // indirect
|
||||
cloud.google.com/go/iam v0.6.0 // indirect
|
||||
cloud.google.com/go/storage v1.27.0 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.2.3 // indirect
|
||||
github.com/Azure/azure-storage-blob-go v0.15.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
|
|
@ -37,9 +37,9 @@ require (
|
|||
github.com/StackExchange/wmi v1.2.1 // indirect
|
||||
github.com/acomagu/bufpipe v1.0.3 // indirect
|
||||
github.com/apparentlymart/go-cidr v1.1.0 // indirect
|
||||
github.com/armon/go-metrics v0.3.10 // indirect
|
||||
github.com/armon/go-metrics v0.4.0 // indirect
|
||||
github.com/armon/go-radix v1.0.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.130 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.135 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/cenkalti/backoff/v3 v3.0.0 // indirect
|
||||
|
|
@ -72,13 +72,13 @@ require (
|
|||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-containerregistry v0.12.0 // indirect
|
||||
github.com/google/go-containerregistry v0.12.1 // indirect
|
||||
github.com/google/go-github/v33 v33.0.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.1-0.20210504230335-f78f29fc09ea // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.6.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
|
||||
github.com/gophercloud/gophercloud v1.0.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
|
|
@ -132,8 +132,8 @@ require (
|
|||
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pkg/sftp v1.13.5 // indirect
|
||||
github.com/prometheus/client_golang v1.13.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
|
|
@ -144,23 +144,23 @@ require (
|
|||
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||
github.com/vbatts/tar-split v0.11.2 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.1 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go4.org v0.0.0-20201209231011-d4a079459e60 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/crypto v0.2.0 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/net v0.1.0 // indirect
|
||||
golang.org/x/oauth2 v0.1.0 // indirect
|
||||
golang.org/x/net v0.2.0 // indirect
|
||||
golang.org/x/oauth2 v0.2.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
golang.org/x/term v0.1.0 // indirect
|
||||
golang.org/x/sys v0.2.0 // indirect
|
||||
golang.org/x/term v0.2.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/api v0.102.0 // indirect
|
||||
google.golang.org/api v0.103.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e // indirect
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c // indirect
|
||||
google.golang.org/grpc v1.50.1 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
|
|
@ -171,7 +171,7 @@ require (
|
|||
k8s.io/kube-openapi v0.0.0-20220803164354-a70c9af30aea // indirect
|
||||
k8s.io/release v0.7.1-0.20210204090829-09fb5e3883b8 // indirect
|
||||
k8s.io/test-infra v0.0.0-20210730160938-8ad9b8c53bd8 // indirect
|
||||
k8s.io/utils v0.0.0-20221101230645-61b03e2f6476 // indirect
|
||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
)
|
||||
|
|
|
|||
157
tests/e2e/go.sum
157
tests/e2e/go.sum
|
|
@ -33,25 +33,14 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT
|
|||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
|
||||
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
|
||||
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
|
||||
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
|
||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
|
||||
cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8=
|
||||
cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
|
||||
cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=
|
||||
cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
|
||||
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
|
||||
cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0=
|
||||
cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
|
||||
cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
|
||||
|
|
@ -59,10 +48,11 @@ cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxB
|
|||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc=
|
||||
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
|
||||
cloud.google.com/go/iam v0.6.0 h1:nsqQC88kT5Iwlm4MeNGTpfMWddp6NB/UOLFTH6m1QfQ=
|
||||
cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc=
|
||||
cloud.google.com/go/logging v1.0.0/go.mod h1:V1cc3ogwobYzQq5f2R7DS/GvRIrI4FKj01Gs5glwAls=
|
||||
cloud.google.com/go/logging v1.1.2/go.mod h1:KrljuAHIw631j9+QXsnq9vDwsrwmdxfGpivMR68M7DY=
|
||||
cloud.google.com/go/longrunning v0.1.1 h1:y50CXG4j0+qvEukslYFBCrzaXX0qpFbBzc3PchSu/LE=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
|
|
@ -76,8 +66,8 @@ cloud.google.com/go/storage v1.9.0/go.mod h1:m+/etGaqZbylxaNT876QGXqEHp4PR2Rq5GM
|
|||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.10.1-0.20200805182106-fcd132957b02/go.mod h1:bdhVveip9CJX75wUu7ALOTnCSKjv6PHRY0bCeBmePnw=
|
||||
cloud.google.com/go/storage v1.12.0/go.mod h1:fFLk2dp2oAhDz8QFKwqrjdJvxSp/W2g7nillojlL5Ho=
|
||||
cloud.google.com/go/storage v1.25.0 h1:D2Dn0PslpK7Z3B2AvuUHyIC762bDbGJdlmQlCBR71os=
|
||||
cloud.google.com/go/storage v1.25.0/go.mod h1:Qys4JU+jeup3QnuKKAosWuxrD95C4MSqxfVDnSirDsI=
|
||||
cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ=
|
||||
cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
|
||||
code.gitea.io/sdk/gitea v0.12.0/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY=
|
||||
contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA=
|
||||
|
|
@ -284,8 +274,8 @@ github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/Y
|
|||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
|
||||
github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||
github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q=
|
||||
github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
|
||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
|
|
@ -322,8 +312,8 @@ github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU
|
|||
github.com/aws/aws-sdk-go v1.31.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.37.22/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.130 h1:a/qwOxmYJF47xTZvTjECSJXnfRbjegb3YxvCXfETtnY=
|
||||
github.com/aws/aws-sdk-go v1.44.130/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.135 h1:DJJP/CkEpgafA5p5jlY9VzDRyKrfABVixzIxrK/3tWU=
|
||||
github.com/aws/aws-sdk-go v1.44.135/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
|
||||
github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU=
|
||||
github.com/bazelbuild/buildtools v0.0.0-20200922170545-10384511ce98/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU=
|
||||
|
|
@ -984,7 +974,6 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw
|
|||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
|
||||
|
|
@ -1043,8 +1032,8 @@ github.com/google/go-containerregistry v0.0.0-20200331213917-3d03ed9b1ca2/go.mod
|
|||
github.com/google/go-containerregistry v0.1.1/go.mod h1:npTSyywOeILcgWqd+rvtzGWflIPPcBQhYoOONaY4ltM=
|
||||
github.com/google/go-containerregistry v0.3.0/go.mod h1:BJ7VxR1hAhdiZBGGnvGETHEmFs1hzXc4VM1xjOPO9wA=
|
||||
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
|
||||
github.com/google/go-containerregistry v0.12.0 h1:nidOEtFYlgPCRqxCKj/4c/js940HVWplCWc5ftdfdUA=
|
||||
github.com/google/go-containerregistry v0.12.0/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k=
|
||||
github.com/google/go-containerregistry v0.12.1 h1:W1mzdNUTx4Zla4JaixCRLhORcR7G6KxE5hHl5fkPsp8=
|
||||
github.com/google/go-containerregistry v0.12.1/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-github/v27 v27.0.6/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0=
|
||||
github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM=
|
||||
|
|
@ -1077,7 +1066,6 @@ github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.m
|
|||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
|
||||
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
|
|
@ -1094,9 +1082,6 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
|
|
@ -1118,10 +1103,8 @@ github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk
|
|||
github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
|
||||
github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU=
|
||||
github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
|
||||
github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
|
|
@ -1707,16 +1690,17 @@ github.com/prometheus/client_golang v1.5.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
|
|||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.1 h1:3gMjIY2+/hzmqhtUC/aQNYldJA6DtH3CgQvwS+02K1c=
|
||||
github.com/prometheus/client_golang v1.13.1/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
|
|
@ -1894,8 +1878,10 @@ github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8t
|
|||
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
|
|
@ -1905,7 +1891,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
|
|||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/sylabs/release-tools v0.1.0/go.mod h1:pqP/z/11/rYMQ0OM/Nn7TxGijw7KfZwW9UolD/J1TUo=
|
||||
github.com/sylabs/sif/v2 v2.4.2/go.mod h1:6gQvzNKRIqr4FS08XBfHpkpnxv9b7h58GLkSJ1zdK9A=
|
||||
|
|
@ -2019,6 +2007,7 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
||||
|
|
@ -2061,8 +2050,9 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w=
|
||||
|
|
@ -2163,8 +2153,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
|||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE=
|
||||
golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
|
@ -2208,6 +2198,7 @@ golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hM
|
|||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
@ -2280,7 +2271,6 @@ golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5o
|
|||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
|
|
@ -2292,8 +2282,10 @@ golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
|
|
@ -2312,13 +2304,11 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y=
|
||||
golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=
|
||||
golang.org/x/oauth2 v0.2.0 h1:GtQkldQ9m7yvzCL1V+LrYow3Khe0eJH0w7RbX/VbaIU=
|
||||
golang.org/x/oauth2 v0.2.0/go.mod h1:Cwn6afJ8jrQwYMxQDTpISoXmXW9I6qF6vDeuuoX3Ibs=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
|
@ -2331,6 +2321,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
@ -2450,42 +2441,38 @@ golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211001092434-39dca1131b70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -2626,8 +2613,6 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f
|
|||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
|
|
@ -2681,20 +2666,8 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR
|
|||
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
||||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
|
||||
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
|
||||
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
|
||||
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
|
||||
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
|
||||
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
|
||||
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
||||
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
|
||||
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
|
||||
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
|
||||
google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I=
|
||||
google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo=
|
||||
google.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ=
|
||||
google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
|
@ -2778,34 +2751,13 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
|
||||
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y=
|
||||
google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo=
|
||||
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
|
|
@ -2839,18 +2791,13 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
|||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY=
|
||||
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
|
@ -2965,8 +2912,8 @@ k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
|
|||
k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s=
|
||||
k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs=
|
||||
k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg=
|
||||
k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ=
|
||||
k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI=
|
||||
k8s.io/api v0.25.4 h1:3YO8J4RtmG7elEgaWMb4HgmpS2CfY1QlaOz9nwB+ZSs=
|
||||
k8s.io/api v0.25.4/go.mod h1:IG2+RzyPQLllQxnhzD8KQNEu4c4YvyDTpSMztf4A0OQ=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190918201827-3de75813f604/go.mod h1:7H8sjDlWQu89yWB3FhZfsLyRCRLuoXoCoY5qtwW1q6I=
|
||||
k8s.io/apiextensions-apiserver v0.16.4/go.mod h1:HYQwjujEkXmQNhap2C9YDdIVOSskGZ3et0Mvjcyjbto=
|
||||
k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs=
|
||||
|
|
@ -2995,8 +2942,8 @@ k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswP
|
|||
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
|
||||
k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U=
|
||||
k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
|
||||
k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc=
|
||||
k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo=
|
||||
k8s.io/apimachinery v0.25.4 h1:CtXsuaitMESSu339tfhVXhQrPET+EiWnIY1rcurKnAc=
|
||||
k8s.io/apimachinery v0.25.4/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo=
|
||||
k8s.io/apiserver v0.0.0-20190918200908-1e17798da8c1/go.mod h1:4FuDU+iKPjdsdQSN3GsEKZLB/feQsj1y9dhhBDVV2Ns=
|
||||
k8s.io/apiserver v0.16.4/go.mod h1:kbLJOak655g6W7C+muqu1F76u9wnEycfKMqbVaXIdAc=
|
||||
k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg=
|
||||
|
|
@ -3126,8 +3073,8 @@ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
|
|||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20221101230645-61b03e2f6476 h1:L14f2LWkOxG2rYsuSA3ltQnnST1vMfek/GUk+VemxD4=
|
||||
k8s.io/utils v0.0.0-20221101230645-61b03e2f6476/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 h1:GfD9OzL11kvZN5iArC6oTS7RTj7oJOIfnislxYlqTj8=
|
||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
knative.dev/caching v0.0.0-20190719140829-2032732871ff/go.mod h1:dHXFU6CGlLlbzaWc32g80cR92iuBSpsslDNBWI8C7eg=
|
||||
knative.dev/caching v0.0.0-20200116200605-67bca2c83dfa/go.mod h1:dHXFU6CGlLlbzaWc32g80cR92iuBSpsslDNBWI8C7eg=
|
||||
knative.dev/eventing-contrib v0.6.1-0.20190723221543-5ce18048c08b/go.mod h1:SnXZgSGgMSMLNFTwTnpaOH7hXDzTFtw0J8OmHflNx3g=
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-immutable-radix"
|
||||
iradix "github.com/hashicorp/go-immutable-radix"
|
||||
)
|
||||
|
||||
type Label struct {
|
||||
|
|
@ -172,6 +172,12 @@ func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabe
|
|||
}
|
||||
}
|
||||
|
||||
func (m *Metrics) Shutdown() {
|
||||
if ss, ok := m.sink.(ShutdownSink); ok {
|
||||
ss.Shutdown()
|
||||
}
|
||||
}
|
||||
|
||||
// labelIsAllowed return true if a should be included in metric
|
||||
// the caller should lock m.filterLock while calling this method
|
||||
func (m *Metrics) labelIsAllowed(label *Label) bool {
|
||||
|
|
|
|||
|
|
@ -24,6 +24,15 @@ type MetricSink interface {
|
|||
AddSampleWithLabels(key []string, val float32, labels []Label)
|
||||
}
|
||||
|
||||
type ShutdownSink interface {
|
||||
MetricSink
|
||||
|
||||
// Shutdown the metric sink, flush metrics to storage, and cleanup resources.
|
||||
// Called immediately prior to application exit. Implementations must block
|
||||
// until metrics are flushed to storage.
|
||||
Shutdown()
|
||||
}
|
||||
|
||||
// BlackholeSink is used to just blackhole messages
|
||||
type BlackholeSink struct{}
|
||||
|
||||
|
|
@ -74,6 +83,14 @@ func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Lab
|
|||
}
|
||||
}
|
||||
|
||||
func (fh FanoutSink) Shutdown() {
|
||||
for _, s := range fh {
|
||||
if ss, ok := s.(ShutdownSink); ok {
|
||||
ss.Shutdown()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided
|
||||
// by each sink type
|
||||
type sinkURLFactoryFunc func(*url.URL) (MetricSink, error)
|
||||
|
|
|
|||
|
|
@ -144,3 +144,15 @@ func UpdateFilter(allow, block []string) {
|
|||
func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
|
||||
globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels)
|
||||
}
|
||||
|
||||
// Shutdown disables metric collection, then blocks while attempting to flush metrics to storage.
|
||||
// WARNING: Not all MetricSink backends support this functionality, and calling this will cause them to leak resources.
|
||||
// This is intended for use immediately prior to application exit.
|
||||
func Shutdown() {
|
||||
m := globalMetrics.Load().(*Metrics)
|
||||
// Swap whatever MetricSink is currently active with a BlackholeSink. Callers must not have a
|
||||
// reason to expect that calls to the library will successfully collect metrics after Shutdown
|
||||
// has been called.
|
||||
globalMetrics.Store(&Metrics{sink: &BlackholeSink{}})
|
||||
m.Shutdown()
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.44.130"
|
||||
const SDKVersion = "1.44.135"
|
||||
|
|
|
|||
|
|
@ -859,6 +859,13 @@ func (c *AutoScaling) CreateLaunchConfigurationRequest(input *CreateLaunchConfig
|
|||
// For more information, see Launch configurations (https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Amazon EC2 Auto Scaling configures instances launched as part of an Auto
|
||||
// Scaling group using either a launch template or a launch configuration. We
|
||||
// strongly recommend that you do not use launch configurations. They do not
|
||||
// provide full functionality for Amazon EC2 Auto Scaling or Amazon EC2. For
|
||||
// information about using launch templates, see Launch templates (https://docs.aws.amazon.com/autoscaling/ec2/userguide/launch-templates.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
// the error.
|
||||
|
|
@ -1051,10 +1058,9 @@ func (c *AutoScaling) DeleteAutoScalingGroupRequest(input *DeleteAutoScalingGrou
|
|||
// Deletes the specified Auto Scaling group.
|
||||
//
|
||||
// If the group has instances or scaling activities in progress, you must specify
|
||||
// the option to force the deletion in order for it to succeed.
|
||||
//
|
||||
// If the group has policies, deleting the group deletes the policies, the underlying
|
||||
// alarm actions, and any alarm that no longer has an associated action.
|
||||
// the option to force the deletion in order for it to succeed. The force delete
|
||||
// operation will also terminate the EC2 instances. If the group has a warm
|
||||
// pool, the force delete option also deletes the warm pool.
|
||||
//
|
||||
// To remove instances from the Auto Scaling group before deleting it, call
|
||||
// the DetachInstances API with the list of instances and the option to decrement
|
||||
|
|
@ -1065,6 +1071,13 @@ func (c *AutoScaling) DeleteAutoScalingGroupRequest(input *DeleteAutoScalingGrou
|
|||
// UpdateAutoScalingGroup API and set the minimum size and desired capacity
|
||||
// of the Auto Scaling group to zero.
|
||||
//
|
||||
// If the group has scaling policies, deleting the group deletes the policies,
|
||||
// the underlying alarm actions, and any alarm that no longer has an associated
|
||||
// action.
|
||||
//
|
||||
// For more information, see Delete your Auto Scaling infrastructure (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-process-shutdown.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
// the error.
|
||||
|
|
@ -7327,6 +7340,11 @@ type CreateAutoScalingGroupInput struct {
|
|||
// The name of the Auto Scaling group. This name must be unique per Region per
|
||||
// account.
|
||||
//
|
||||
// The name can contain any ASCII character 33 to 126 including most punctuation
|
||||
// characters, digits, and upper and lowercased letters.
|
||||
//
|
||||
// You cannot use a colon (:) in the name.
|
||||
//
|
||||
// AutoScalingGroupName is a required field
|
||||
AutoScalingGroupName *string `min:"1" type:"string" required:"true"`
|
||||
|
||||
|
|
@ -7405,7 +7423,7 @@ type CreateAutoScalingGroupInput struct {
|
|||
// and marking it unhealthy due to a failed Elastic Load Balancing or custom
|
||||
// health check. This is useful if your instances do not immediately pass these
|
||||
// health checks after they enter the InService state. For more information,
|
||||
// see Health check grace period (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html#health-check-grace-period)
|
||||
// see Set the health check grace period for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/health-check-grace-period.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// Default: 0 seconds
|
||||
|
|
@ -7480,10 +7498,8 @@ type CreateAutoScalingGroupInput struct {
|
|||
// MinSize is a required field
|
||||
MinSize *int64 `type:"integer" required:"true"`
|
||||
|
||||
// An embedded object that specifies a mixed instances policy.
|
||||
//
|
||||
// For more information, see Auto Scaling groups with multiple instance types
|
||||
// and purchase options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html)
|
||||
// The mixed instances policy. For more information, see Auto Scaling groups
|
||||
// with multiple instance types and purchase options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
MixedInstancesPolicy *MixedInstancesPolicy `type:"structure"`
|
||||
|
||||
|
|
@ -7842,21 +7858,10 @@ type CreateLaunchConfigurationInput struct {
|
|||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
BlockDeviceMappings []*BlockDeviceMapping `type:"list"`
|
||||
|
||||
// EC2-Classic retires on August 15, 2022. This property is not supported after
|
||||
// that date.
|
||||
//
|
||||
// The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to.
|
||||
// For more information, see ClassicLink (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
// Available for backward compatibility.
|
||||
ClassicLinkVPCId *string `min:"1" type:"string"`
|
||||
|
||||
// EC2-Classic retires on August 15, 2022. This property is not supported after
|
||||
// that date.
|
||||
//
|
||||
// The IDs of one or more security groups for the specified ClassicLink-enabled
|
||||
// VPC.
|
||||
//
|
||||
// If you specify the ClassicLinkVPCId property, you must specify ClassicLinkVPCSecurityGroups.
|
||||
// Available for backward compatibility.
|
||||
ClassicLinkVPCSecurityGroups []*string `type:"list"`
|
||||
|
||||
// Specifies whether the launch configuration is optimized for EBS I/O (true)
|
||||
|
|
@ -10954,11 +10959,13 @@ type DesiredConfiguration struct {
|
|||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
LaunchTemplate *LaunchTemplateSpecification `type:"structure"`
|
||||
|
||||
// Describes a mixed instances policy. A mixed instances policy contains the
|
||||
// instance types that Amazon EC2 Auto Scaling can launch and other information
|
||||
// that Amazon EC2 Auto Scaling can use to launch instances and help optimize
|
||||
// your costs. For more information, see Auto Scaling groups with multiple instance
|
||||
// types and purchase options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html)
|
||||
// Use this structure to launch multiple instance types and On-Demand Instances
|
||||
// and Spot Instances within a single Auto Scaling group.
|
||||
//
|
||||
// A mixed instances policy contains information that Amazon EC2 Auto Scaling
|
||||
// can use to launch instances and help optimize your costs. For more information,
|
||||
// see Auto Scaling groups with multiple instance types and purchase options
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
MixedInstancesPolicy *MixedInstancesPolicy `type:"structure"`
|
||||
}
|
||||
|
|
@ -13431,15 +13438,34 @@ func (s *InstanceRefreshWarmPoolProgress) SetPercentageComplete(v int64) *Instan
|
|||
return s
|
||||
}
|
||||
|
||||
// When you specify multiple parameters, you get instance types that satisfy
|
||||
// all of the specified parameters. If you specify multiple values for a parameter,
|
||||
// The attributes for the instance types for a mixed instances policy. Amazon
|
||||
// EC2 Auto Scaling uses your specified requirements to identify instance types.
|
||||
// Then, it uses your On-Demand and Spot allocation strategies to launch instances
|
||||
// from these instance types.
|
||||
//
|
||||
// When you specify multiple attributes, you get instance types that satisfy
|
||||
// all of the specified attributes. If you specify multiple values for an attribute,
|
||||
// you get instance types that satisfy any of the specified values.
|
||||
//
|
||||
// Represents requirements for the types of instances that can be launched.
|
||||
// You must specify VCpuCount and MemoryMiB, but all other parameters are optional.
|
||||
// To limit the list of instance types from which Amazon EC2 Auto Scaling can
|
||||
// identify matching instance types, you can use one of the following parameters,
|
||||
// but not both in the same request:
|
||||
//
|
||||
// - AllowedInstanceTypes - The instance types to include in the list. All
|
||||
// other instance types are ignored, even if they match your specified attributes.
|
||||
//
|
||||
// - ExcludedInstanceTypes - The instance types to exclude from the list,
|
||||
// even if they match your specified attributes.
|
||||
//
|
||||
// You must specify VCpuCount and MemoryMiB. All other attributes are optional.
|
||||
// Any unspecified optional attribute is set to its default.
|
||||
//
|
||||
// For more information, see Creating an Auto Scaling group using attribute-based
|
||||
// instance type selection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-instance-type-requirements.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
// in the Amazon EC2 Auto Scaling User Guide. For help determining which instance
|
||||
// types match your attributes before you apply them to your Auto Scaling group,
|
||||
// see Preview instance types with specified attributes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html#ec2fleet-get-instance-types-from-instance-requirements)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
type InstanceRequirements struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
|
@ -13448,7 +13474,7 @@ type InstanceRequirements struct {
|
|||
//
|
||||
// To exclude accelerator-enabled instance types, set Max to 0.
|
||||
//
|
||||
// Default: No minimum or maximum
|
||||
// Default: No minimum or maximum limits
|
||||
AcceleratorCount *AcceleratorCountRequest `type:"structure"`
|
||||
|
||||
// Indicates whether instance types must have accelerators by specific manufacturers.
|
||||
|
|
@ -13486,7 +13512,7 @@ type InstanceRequirements struct {
|
|||
// The minimum and maximum total memory size for the accelerators on an instance
|
||||
// type, in MiB.
|
||||
//
|
||||
// Default: No minimum or maximum
|
||||
// Default: No minimum or maximum limits
|
||||
AcceleratorTotalMemoryMiB *AcceleratorTotalMemoryMiBRequest `type:"structure"`
|
||||
|
||||
// Lists the accelerator types that must be on an instance type.
|
||||
|
|
@ -13500,6 +13526,23 @@ type InstanceRequirements struct {
|
|||
// Default: Any accelerator type
|
||||
AcceleratorTypes []*string `type:"list" enum:"AcceleratorType"`
|
||||
|
||||
// The instance types to apply your specified attributes against. All other
|
||||
// instance types are ignored, even if they match your specified attributes.
|
||||
//
|
||||
// You can use strings with one or more wild cards, represented by an asterisk
|
||||
// (*), to allow an instance type, size, or generation. The following are examples:
|
||||
// m5.8xlarge, c5*.*, m5a.*, r*, *3*.
|
||||
//
|
||||
// For example, if you specify c5*, Amazon EC2 Auto Scaling will allow the entire
|
||||
// C5 instance family, which includes all C5a and C5n instance types. If you
|
||||
// specify m5a.*, Amazon EC2 Auto Scaling will allow all the M5a instance types,
|
||||
// but not the M5n instance types.
|
||||
//
|
||||
// If you specify AllowedInstanceTypes, you can't specify ExcludedInstanceTypes.
|
||||
//
|
||||
// Default: All instance types
|
||||
AllowedInstanceTypes []*string `type:"list"`
|
||||
|
||||
// Indicates whether bare metal instance types are included, excluded, or required.
|
||||
//
|
||||
// Default: excluded
|
||||
|
|
@ -13509,7 +13552,7 @@ type InstanceRequirements struct {
|
|||
// in Mbps. For more information, see Amazon EBS–optimized instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
//
|
||||
// Default: No minimum or maximum
|
||||
// Default: No minimum or maximum limits
|
||||
BaselineEbsBandwidthMbps *BaselineEbsBandwidthMbpsRequest `type:"structure"`
|
||||
|
||||
// Indicates whether burstable performance instance types are included, excluded,
|
||||
|
|
@ -13534,13 +13577,17 @@ type InstanceRequirements struct {
|
|||
// Default: Any manufacturer
|
||||
CpuManufacturers []*string `type:"list" enum:"CpuManufacturer"`
|
||||
|
||||
// Lists which instance types to exclude. You can use strings with one or more
|
||||
// wild cards, represented by an asterisk (*). The following are examples: c5*,
|
||||
// m5a.*, r*, *3*.
|
||||
// The instance types to exclude. You can use strings with one or more wild
|
||||
// cards, represented by an asterisk (*), to exclude an instance family, type,
|
||||
// size, or generation. The following are examples: m5.8xlarge, c5*.*, m5a.*,
|
||||
// r*, *3*.
|
||||
//
|
||||
// For example, if you specify c5*, you are excluding the entire C5 instance
|
||||
// family, which includes all C5a and C5n instance types. If you specify m5a.*,
|
||||
// you are excluding all the M5a instance types, but not the M5n instance types.
|
||||
// Amazon EC2 Auto Scaling will exclude all the M5a instance types, but not
|
||||
// the M5n instance types.
|
||||
//
|
||||
// If you specify ExcludedInstanceTypes, you can't specify AllowedInstanceTypes.
|
||||
//
|
||||
// Default: No excluded instance types
|
||||
ExcludedInstanceTypes []*string `type:"list"`
|
||||
|
|
@ -13578,7 +13625,7 @@ type InstanceRequirements struct {
|
|||
// The minimum and maximum amount of memory per vCPU for an instance type, in
|
||||
// GiB.
|
||||
//
|
||||
// Default: No minimum or maximum
|
||||
// Default: No minimum or maximum limits
|
||||
MemoryGiBPerVCpu *MemoryGiBPerVCpuRequest `type:"structure"`
|
||||
|
||||
// The minimum and maximum instance memory size for an instance type, in MiB.
|
||||
|
|
@ -13586,9 +13633,15 @@ type InstanceRequirements struct {
|
|||
// MemoryMiB is a required field
|
||||
MemoryMiB *MemoryMiBRequest `type:"structure" required:"true"`
|
||||
|
||||
// The minimum and maximum amount of network bandwidth, in gigabits per second
|
||||
// (Gbps).
|
||||
//
|
||||
// Default: No minimum or maximum limits
|
||||
NetworkBandwidthGbps *NetworkBandwidthGbpsRequest `type:"structure"`
|
||||
|
||||
// The minimum and maximum number of network interfaces for an instance type.
|
||||
//
|
||||
// Default: No minimum or maximum
|
||||
// Default: No minimum or maximum limits
|
||||
NetworkInterfaceCount *NetworkInterfaceCountRequest `type:"structure"`
|
||||
|
||||
// The price protection threshold for On-Demand Instances. This is the maximum
|
||||
|
|
@ -13632,7 +13685,7 @@ type InstanceRequirements struct {
|
|||
// The minimum and maximum total local storage size for an instance type, in
|
||||
// GB.
|
||||
//
|
||||
// Default: No minimum or maximum
|
||||
// Default: No minimum or maximum limits
|
||||
TotalLocalStorageGB *TotalLocalStorageGBRequest `type:"structure"`
|
||||
|
||||
// The minimum and maximum number of vCPUs for an instance type.
|
||||
|
|
@ -13715,6 +13768,12 @@ func (s *InstanceRequirements) SetAcceleratorTypes(v []*string) *InstanceRequire
|
|||
return s
|
||||
}
|
||||
|
||||
// SetAllowedInstanceTypes sets the AllowedInstanceTypes field's value.
|
||||
func (s *InstanceRequirements) SetAllowedInstanceTypes(v []*string) *InstanceRequirements {
|
||||
s.AllowedInstanceTypes = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetBareMetal sets the BareMetal field's value.
|
||||
func (s *InstanceRequirements) SetBareMetal(v string) *InstanceRequirements {
|
||||
s.BareMetal = &v
|
||||
|
|
@ -13775,6 +13834,12 @@ func (s *InstanceRequirements) SetMemoryMiB(v *MemoryMiBRequest) *InstanceRequir
|
|||
return s
|
||||
}
|
||||
|
||||
// SetNetworkBandwidthGbps sets the NetworkBandwidthGbps field's value.
|
||||
func (s *InstanceRequirements) SetNetworkBandwidthGbps(v *NetworkBandwidthGbpsRequest) *InstanceRequirements {
|
||||
s.NetworkBandwidthGbps = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetNetworkInterfaceCount sets the NetworkInterfaceCount field's value.
|
||||
func (s *InstanceRequirements) SetNetworkInterfaceCount(v *NetworkInterfaceCountRequest) *InstanceRequirements {
|
||||
s.NetworkInterfaceCount = v
|
||||
|
|
@ -13847,36 +13912,45 @@ func (s *InstanceReusePolicy) SetReuseOnScaleIn(v bool) *InstanceReusePolicy {
|
|||
return s
|
||||
}
|
||||
|
||||
// Describes an instances distribution for an Auto Scaling group.
|
||||
// Use this structure to specify the distribution of On-Demand Instances and
|
||||
// Spot Instances and the allocation strategies used to fulfill On-Demand and
|
||||
// Spot capacities for a mixed instances policy.
|
||||
type InstancesDistribution struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The order of the launch template overrides to use in fulfilling On-Demand
|
||||
// capacity.
|
||||
// The allocation strategy to apply to your On-Demand Instances when they are
|
||||
// launched. Possible instance types are determined by the launch template overrides
|
||||
// that you specify.
|
||||
//
|
||||
// If you specify lowest-price, Amazon EC2 Auto Scaling uses price to determine
|
||||
// the order, launching the lowest price first.
|
||||
// The following lists the valid values:
|
||||
//
|
||||
// If you specify prioritized, Amazon EC2 Auto Scaling uses the priority that
|
||||
// you assigned to each launch template override, launching the highest priority
|
||||
// first. If all your On-Demand capacity cannot be fulfilled using your highest
|
||||
// priority instance, then Amazon EC2 Auto Scaling launches the remaining capacity
|
||||
// using the second priority instance type, and so on.
|
||||
// lowest-price
|
||||
//
|
||||
// Default: lowest-price for Auto Scaling groups that specify InstanceRequirements
|
||||
// in the overrides and prioritized for Auto Scaling groups that don't.
|
||||
// Uses price to determine which instance types are the highest priority, launching
|
||||
// the lowest priced instance types within an Availability Zone first. This
|
||||
// is the default value for Auto Scaling groups that specify InstanceRequirements.
|
||||
//
|
||||
// Valid values: lowest-price | prioritized
|
||||
// prioritized
|
||||
//
|
||||
// You set the order of instance types for the launch template overrides from
|
||||
// highest to lowest priority (from first to last in the list). Amazon EC2 Auto
|
||||
// Scaling launches your highest priority instance types first. If all your
|
||||
// On-Demand capacity cannot be fulfilled using your highest priority instance
|
||||
// type, then Amazon EC2 Auto Scaling launches the remaining capacity using
|
||||
// the second priority instance type, and so on. This is the default value for
|
||||
// Auto Scaling groups that don't specify InstanceRequirements and cannot be
|
||||
// used for groups that do.
|
||||
OnDemandAllocationStrategy *string `type:"string"`
|
||||
|
||||
// The minimum amount of the Auto Scaling group's capacity that must be fulfilled
|
||||
// by On-Demand Instances. This base portion is launched first as your group
|
||||
// scales.
|
||||
//
|
||||
// If you specify weights for the instance types in the overrides, the base
|
||||
// capacity is measured in the same unit of measurement as the instance types.
|
||||
// If you specify InstanceRequirements in the overrides, the base capacity is
|
||||
// measured in the same unit of measurement as your group's desired capacity.
|
||||
// This number has the same unit of measurement as the group's desired capacity.
|
||||
// If you change the default unit of measurement (number of instances) by specifying
|
||||
// weighted capacity values in your launch template overrides list, or by changing
|
||||
// the default desired capacity type setting of the group, you must specify
|
||||
// this number using the same unit of measurement.
|
||||
//
|
||||
// Default: 0
|
||||
OnDemandBaseCapacity *int64 `type:"integer"`
|
||||
|
|
@ -13889,41 +13963,63 @@ type InstancesDistribution struct {
|
|||
// Default: 100
|
||||
OnDemandPercentageAboveBaseCapacity *int64 `type:"integer"`
|
||||
|
||||
// Indicates how to allocate instances across Spot Instance pools.
|
||||
// The allocation strategy to apply to your Spot Instances when they are launched.
|
||||
// Possible instance types are determined by the launch template overrides that
|
||||
// you specify.
|
||||
//
|
||||
// If the allocation strategy is lowest-price, the Auto Scaling group launches
|
||||
// instances using the Spot pools with the lowest price, and evenly allocates
|
||||
// your instances across the number of Spot pools that you specify.
|
||||
// The following lists the valid values:
|
||||
//
|
||||
// If the allocation strategy is capacity-optimized (recommended), the Auto
|
||||
// Scaling group launches instances using Spot pools that are optimally chosen
|
||||
// based on the available Spot capacity. Alternatively, you can use capacity-optimized-prioritized
|
||||
// and set the order of instance types in the list of launch template overrides
|
||||
// from highest to lowest priority (from first to last in the list). Amazon
|
||||
// EC2 Auto Scaling honors the instance type priorities on a best-effort basis
|
||||
// but optimizes for capacity first.
|
||||
// capacity-optimized
|
||||
//
|
||||
// Default: lowest-price
|
||||
// Requests Spot Instances using pools that are optimally chosen based on the
|
||||
// available Spot capacity. This strategy has the lowest risk of interruption.
|
||||
// To give certain instance types a higher chance of launching first, use capacity-optimized-prioritized.
|
||||
//
|
||||
// Valid values: lowest-price | capacity-optimized | capacity-optimized-prioritized
|
||||
// capacity-optimized-prioritized
|
||||
//
|
||||
// You set the order of instance types for the launch template overrides from
|
||||
// highest to lowest priority (from first to last in the list). Amazon EC2 Auto
|
||||
// Scaling honors the instance type priorities on a best effort basis but optimizes
|
||||
// for capacity first. Note that if the On-Demand allocation strategy is set
|
||||
// to prioritized, the same priority is applied when fulfilling On-Demand capacity.
|
||||
// This is not a valid value for Auto Scaling groups that specify InstanceRequirements.
|
||||
//
|
||||
// lowest-price
|
||||
//
|
||||
// Requests Spot Instances using the lowest priced pools within an Availability
|
||||
// Zone, across the number of Spot pools that you specify for the SpotInstancePools
|
||||
// property. To ensure that your desired capacity is met, you might receive
|
||||
// Spot Instances from several pools. This is the default value, but it might
|
||||
// lead to high interruption rates because this strategy only considers instance
|
||||
// price and not available capacity.
|
||||
//
|
||||
// price-capacity-optimized (recommended)
|
||||
//
|
||||
// Amazon EC2 Auto Scaling identifies the pools with the highest capacity availability
|
||||
// for the number of instances that are launching. This means that we will request
|
||||
// Spot Instances from the pools that we believe have the lowest chance of interruption
|
||||
// in the near term. Amazon EC2 Auto Scaling then requests Spot Instances from
|
||||
// the lowest priced of these pools.
|
||||
SpotAllocationStrategy *string `type:"string"`
|
||||
|
||||
// The number of Spot Instance pools across which to allocate your Spot Instances.
|
||||
// The Spot pools are determined from the different instance types in the overrides.
|
||||
// Valid only when the Spot allocation strategy is lowest-price. Value must
|
||||
// be in the range of 1–20.
|
||||
// Valid only when the SpotAllocationStrategy is lowest-price. Value must be
|
||||
// in the range of 1–20.
|
||||
//
|
||||
// Default: 2
|
||||
SpotInstancePools *int64 `type:"integer"`
|
||||
|
||||
// The maximum price per unit hour that you are willing to pay for a Spot Instance.
|
||||
// If you keep the value at its default (unspecified), Amazon EC2 Auto Scaling
|
||||
// uses the On-Demand price as the maximum Spot price. To remove a value that
|
||||
// you previously set, include the property but specify an empty string ("")
|
||||
// for the value.
|
||||
//
|
||||
// If your maximum price is lower than the Spot price for the instance types
|
||||
// that you selected, your Spot Instances are not launched.
|
||||
// that you selected, your Spot Instances are not launched. We do not recommend
|
||||
// specifying a maximum price because it can lead to increased interruptions.
|
||||
// When Spot Instances launch, you pay the current Spot price. To remove a maximum
|
||||
// price that you previously set, include the property but specify an empty
|
||||
// string ("") for the value.
|
||||
//
|
||||
// If you specify a maximum price, your instances will be interrupted more frequently
|
||||
// than if you do not specify one.
|
||||
//
|
||||
// Valid Range: Minimum value of 0.001
|
||||
SpotMaxPrice *string `type:"string"`
|
||||
|
|
@ -14004,16 +14100,10 @@ type LaunchConfiguration struct {
|
|||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
BlockDeviceMappings []*BlockDeviceMapping `type:"list"`
|
||||
|
||||
// EC2-Classic retires on August 15, 2022. This property is not supported after
|
||||
// that date.
|
||||
//
|
||||
// The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances to.
|
||||
// Available for backward compatibility.
|
||||
ClassicLinkVPCId *string `min:"1" type:"string"`
|
||||
|
||||
// EC2-Classic retires on August 15, 2022. This property is not supported after
|
||||
// that date.
|
||||
//
|
||||
// The IDs of one or more security groups for the VPC specified in ClassicLinkVPCId.
|
||||
// Available for backward compatibility.
|
||||
ClassicLinkVPCSecurityGroups []*string `type:"list"`
|
||||
|
||||
// The creation date and time for the launch configuration.
|
||||
|
|
@ -14250,21 +14340,16 @@ func (s *LaunchConfiguration) SetUserData(v string) *LaunchConfiguration {
|
|||
return s
|
||||
}
|
||||
|
||||
// Describes a launch template and overrides. You specify these properties as
|
||||
// part of a mixed instances policy.
|
||||
// Use this structure to specify the launch templates and instance types (overrides)
|
||||
// for a mixed instances policy.
|
||||
type LaunchTemplate struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The launch template to use.
|
||||
// The launch template.
|
||||
LaunchTemplateSpecification *LaunchTemplateSpecification `type:"structure"`
|
||||
|
||||
// Any properties that you specify override the same properties in the launch
|
||||
// template. If not provided, Amazon EC2 Auto Scaling uses the instance type
|
||||
// or instance type requirements specified in the launch template when it launches
|
||||
// an instance.
|
||||
//
|
||||
// The overrides can include either one or more instance types or a set of instance
|
||||
// requirements, but not both.
|
||||
// template.
|
||||
Overrides []*LaunchTemplateOverrides `type:"list"`
|
||||
}
|
||||
|
||||
|
|
@ -14323,45 +14408,82 @@ func (s *LaunchTemplate) SetOverrides(v []*LaunchTemplateOverrides) *LaunchTempl
|
|||
return s
|
||||
}
|
||||
|
||||
// Describes an override for a launch template. For more information, see Configuring
|
||||
// overrides (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-configuring-overrides.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
// Use this structure to let Amazon EC2 Auto Scaling do the following when the
|
||||
// Auto Scaling group has a mixed instances policy:
|
||||
//
|
||||
// - Override the instance type that is specified in the launch template.
|
||||
//
|
||||
// - Use multiple instance types.
|
||||
//
|
||||
// Specify the instance types that you want, or define your instance requirements
|
||||
// instead and let Amazon EC2 Auto Scaling provision the available instance
|
||||
// types that meet your requirements. This can provide Amazon EC2 Auto Scaling
|
||||
// with a larger selection of instance types to choose from when fulfilling
|
||||
// Spot and On-Demand capacities. You can view which instance types are matched
|
||||
// before you apply the instance requirements to your Auto Scaling group.
|
||||
//
|
||||
// After you define your instance requirements, you don't have to keep updating
|
||||
// these settings to get new EC2 instance types automatically. Amazon EC2 Auto
|
||||
// Scaling uses the instance requirements of the Auto Scaling group to determine
|
||||
// whether a new EC2 instance type can be used.
|
||||
type LaunchTemplateOverrides struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The instance requirements. When you specify instance requirements, Amazon
|
||||
// EC2 Auto Scaling finds instance types that satisfy your requirements, and
|
||||
// then uses your On-Demand and Spot allocation strategies to launch instances
|
||||
// from these instance types, in the same way as when you specify a list of
|
||||
// specific instance types.
|
||||
// The instance requirements. Amazon EC2 Auto Scaling uses your specified requirements
|
||||
// to identify instance types. Then, it uses your On-Demand and Spot allocation
|
||||
// strategies to launch instances from these instance types.
|
||||
//
|
||||
// You can specify up to four separate sets of instance requirements per Auto
|
||||
// Scaling group. This is useful for provisioning instances from different Amazon
|
||||
// Machine Images (AMIs) in the same Auto Scaling group. To do this, create
|
||||
// the AMIs and create a new launch template for each AMI. Then, create a compatible
|
||||
// set of instance requirements for each launch template.
|
||||
//
|
||||
// If you specify InstanceRequirements, you can't specify InstanceType.
|
||||
InstanceRequirements *InstanceRequirements `type:"structure"`
|
||||
|
||||
// The instance type, such as m3.xlarge. You must use an instance type that
|
||||
// The instance type, such as m3.xlarge. You must specify an instance type that
|
||||
// is supported in your requested Region and Availability Zones. For more information,
|
||||
// see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
//
|
||||
// You can specify up to 40 instance types per Auto Scaling group.
|
||||
InstanceType *string `min:"1" type:"string"`
|
||||
|
||||
// Provides a launch template for the specified instance type or instance requirements.
|
||||
// For example, some instance types might require a launch template with a different
|
||||
// AMI. If not provided, Amazon EC2 Auto Scaling uses the launch template that's
|
||||
// defined for your mixed instances policy. For more information, see Specifying
|
||||
// a different launch template for an instance type (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups-launch-template-overrides.html)
|
||||
// Provides a launch template for the specified instance type or set of instance
|
||||
// requirements. For example, some instance types might require a launch template
|
||||
// with a different AMI. If not provided, Amazon EC2 Auto Scaling uses the launch
|
||||
// template that's specified in the LaunchTemplate definition. For more information,
|
||||
// see Specifying a different launch template for an instance type (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups-launch-template-overrides.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
//
|
||||
// You can specify up to 20 launch templates per Auto Scaling group. The launch
|
||||
// templates specified in the overrides and in the LaunchTemplate definition
|
||||
// count towards this limit.
|
||||
LaunchTemplateSpecification *LaunchTemplateSpecification `type:"structure"`
|
||||
|
||||
// The number of capacity units provided by the instance type specified in InstanceType
|
||||
// in terms of virtual CPUs, memory, storage, throughput, or other relative
|
||||
// performance characteristic. When a Spot or On-Demand Instance is launched,
|
||||
// the capacity units count toward the desired capacity. Amazon EC2 Auto Scaling
|
||||
// launches instances until the desired capacity is totally fulfilled, even
|
||||
// if this results in an overage. For example, if there are two units remaining
|
||||
// to fulfill capacity, and Amazon EC2 Auto Scaling can only launch an instance
|
||||
// with a WeightedCapacity of five units, the instance is launched, and the
|
||||
// desired capacity is exceeded by three units. For more information, see Configuring
|
||||
// instance weighting for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups-instance-weighting.html)
|
||||
// If you provide a list of instance types to use, you can specify the number
|
||||
// of capacity units provided by each instance type in terms of virtual CPUs,
|
||||
// memory, storage, throughput, or other relative performance characteristic.
|
||||
// When a Spot or On-Demand Instance is launched, the capacity units count toward
|
||||
// the desired capacity. Amazon EC2 Auto Scaling launches instances until the
|
||||
// desired capacity is totally fulfilled, even if this results in an overage.
|
||||
// For example, if there are two units remaining to fulfill capacity, and Amazon
|
||||
// EC2 Auto Scaling can only launch an instance with a WeightedCapacity of five
|
||||
// units, the instance is launched, and the desired capacity is exceeded by
|
||||
// three units. For more information, see Configuring instance weighting for
|
||||
// Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups-instance-weighting.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide. Value must be in the range of
|
||||
// 1–999.
|
||||
//
|
||||
// If you specify a value for WeightedCapacity for one instance type, you must
|
||||
// specify a value for WeightedCapacity for all of them.
|
||||
//
|
||||
// Every Auto Scaling group has three size parameters (DesiredCapacity, MaxSize,
|
||||
// and MinSize). Usually, you set these sizes based on a specific number of
|
||||
// instances. However, if you configure a mixed instances policy that defines
|
||||
// weights for the instance types, you must specify these sizes with the same
|
||||
// units that you use for weighting instances.
|
||||
WeightedCapacity *string `min:"1" type:"string"`
|
||||
}
|
||||
|
||||
|
|
@ -15519,11 +15641,13 @@ func (s *MetricStat) SetUnit(v string) *MetricStat {
|
|||
return s
|
||||
}
|
||||
|
||||
// Describes a mixed instances policy. A mixed instances policy contains the
|
||||
// instance types that Amazon EC2 Auto Scaling can launch and other information
|
||||
// that Amazon EC2 Auto Scaling can use to launch instances and help optimize
|
||||
// your costs. For more information, see Auto Scaling groups with multiple instance
|
||||
// types and purchase options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html)
|
||||
// Use this structure to launch multiple instance types and On-Demand Instances
|
||||
// and Spot Instances within a single Auto Scaling group.
|
||||
//
|
||||
// A mixed instances policy contains information that Amazon EC2 Auto Scaling
|
||||
// can use to launch instances and help optimize your costs. For more information,
|
||||
// see Auto Scaling groups with multiple instance types and purchase options
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
type MixedInstancesPolicy struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
|
@ -15581,6 +15705,55 @@ func (s *MixedInstancesPolicy) SetLaunchTemplate(v *LaunchTemplate) *MixedInstan
|
|||
return s
|
||||
}
|
||||
|
||||
// Specifies the minimum and maximum for the NetworkBandwidthGbps object when
|
||||
// you specify InstanceRequirements for an Auto Scaling group.
|
||||
//
|
||||
// Setting the minimum bandwidth does not guarantee that your instance will
|
||||
// achieve the minimum bandwidth. Amazon EC2 will identify instance types that
|
||||
// support the specified minimum bandwidth, but the actual bandwidth of your
|
||||
// instance might go below the specified minimum at times. For more information,
|
||||
// see Available instance bandwidth (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-network-bandwidth.html#available-instance-bandwidth)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
type NetworkBandwidthGbpsRequest struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The maximum amount of network bandwidth, in gigabits per second (Gbps).
|
||||
Max *float64 `type:"double"`
|
||||
|
||||
// The minimum amount of network bandwidth, in gigabits per second (Gbps).
|
||||
Min *float64 `type:"double"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s NetworkBandwidthGbpsRequest) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s NetworkBandwidthGbpsRequest) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetMax sets the Max field's value.
|
||||
func (s *NetworkBandwidthGbpsRequest) SetMax(v float64) *NetworkBandwidthGbpsRequest {
|
||||
s.Max = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetMin sets the Min field's value.
|
||||
func (s *NetworkBandwidthGbpsRequest) SetMin(v float64) *NetworkBandwidthGbpsRequest {
|
||||
s.Min = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// Specifies the minimum and maximum for the NetworkInterfaceCount object when
|
||||
// you specify InstanceRequirements for an Auto Scaling group.
|
||||
type NetworkInterfaceCountRequest struct {
|
||||
|
|
@ -19352,7 +19525,7 @@ type UpdateAutoScalingGroupInput struct {
|
|||
// and marking it unhealthy due to a failed Elastic Load Balancing or custom
|
||||
// health check. This is useful if your instances do not immediately pass these
|
||||
// health checks after they enter the InService state. For more information,
|
||||
// see Health check grace period (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html#health-check-grace-period)
|
||||
// see Set the health check grace period for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/health-check-grace-period.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
HealthCheckGracePeriod *int64 `type:"integer"`
|
||||
|
||||
|
|
@ -19391,9 +19564,8 @@ type UpdateAutoScalingGroupInput struct {
|
|||
// The minimum size of the Auto Scaling group.
|
||||
MinSize *int64 `type:"integer"`
|
||||
|
||||
// An embedded object that specifies a mixed instances policy. For more information,
|
||||
// see Auto Scaling groups with multiple instance types and purchase options
|
||||
// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html)
|
||||
// The mixed instances policy. For more information, see Auto Scaling groups
|
||||
// with multiple instance types and purchase options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups.html)
|
||||
// in the Amazon EC2 Auto Scaling User Guide.
|
||||
MixedInstancesPolicy *MixedInstancesPolicy `type:"structure"`
|
||||
|
||||
|
|
|
|||
|
|
@ -3356,6 +3356,82 @@ func (c *EC2) CancelExportTaskWithContext(ctx aws.Context, input *CancelExportTa
|
|||
return out, req.Send()
|
||||
}
|
||||
|
||||
const opCancelImageLaunchPermission = "CancelImageLaunchPermission"
|
||||
|
||||
// CancelImageLaunchPermissionRequest generates a "aws/request.Request" representing the
|
||||
// client's request for the CancelImageLaunchPermission operation. The "output" return
|
||||
// value will be populated with the request's response once the request completes
|
||||
// successfully.
|
||||
//
|
||||
// Use "Send" method on the returned Request to send the API call to the service.
|
||||
// the "output" return value is not valid until after Send returns without error.
|
||||
//
|
||||
// See CancelImageLaunchPermission for more information on using the CancelImageLaunchPermission
|
||||
// API call, and error handling.
|
||||
//
|
||||
// This method is useful when you want to inject custom logic or configuration
|
||||
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
||||
//
|
||||
// // Example sending a request using the CancelImageLaunchPermissionRequest method.
|
||||
// req, resp := client.CancelImageLaunchPermissionRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelImageLaunchPermission
|
||||
func (c *EC2) CancelImageLaunchPermissionRequest(input *CancelImageLaunchPermissionInput) (req *request.Request, output *CancelImageLaunchPermissionOutput) {
|
||||
op := &request.Operation{
|
||||
Name: opCancelImageLaunchPermission,
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
}
|
||||
|
||||
if input == nil {
|
||||
input = &CancelImageLaunchPermissionInput{}
|
||||
}
|
||||
|
||||
output = &CancelImageLaunchPermissionOutput{}
|
||||
req = c.newRequest(op, input, output)
|
||||
return
|
||||
}
|
||||
|
||||
// CancelImageLaunchPermission API operation for Amazon Elastic Compute Cloud.
|
||||
//
|
||||
// Removes your Amazon Web Services account from the launch permissions for
|
||||
// the specified AMI. For more information, see Cancel sharing an AMI with your
|
||||
// Amazon Web Services account (https://docs.aws.amazon.com/) in the Amazon
|
||||
// Elastic Compute Cloud User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
// the error.
|
||||
//
|
||||
// See the AWS API reference guide for Amazon Elastic Compute Cloud's
|
||||
// API operation CancelImageLaunchPermission for usage and error information.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CancelImageLaunchPermission
|
||||
func (c *EC2) CancelImageLaunchPermission(input *CancelImageLaunchPermissionInput) (*CancelImageLaunchPermissionOutput, error) {
|
||||
req, out := c.CancelImageLaunchPermissionRequest(input)
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
// CancelImageLaunchPermissionWithContext is the same as CancelImageLaunchPermission with the addition of
|
||||
// the ability to pass a context and additional request options.
|
||||
//
|
||||
// See CancelImageLaunchPermission for details on how to use this API operation.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *EC2) CancelImageLaunchPermissionWithContext(ctx aws.Context, input *CancelImageLaunchPermissionInput, opts ...request.Option) (*CancelImageLaunchPermissionOutput, error) {
|
||||
req, out := c.CancelImageLaunchPermissionRequest(input)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
const opCancelImportTask = "CancelImportTask"
|
||||
|
||||
// CancelImportTaskRequest generates a "aws/request.Request" representing the
|
||||
|
|
@ -27432,7 +27508,7 @@ func (c *EC2) DescribeSpotFleetRequestHistoryRequest(input *DescribeSpotFleetReq
|
|||
// recorded event. Spot Fleet events are available for 48 hours.
|
||||
//
|
||||
// For more information, see Monitor fleet events using Amazon EventBridge (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/fleet-monitor.html)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
// in the Amazon EC2 User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
|
|
@ -47157,12 +47233,12 @@ func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *reques
|
|||
// only the spot-fleet-request and instance resource types are supported.
|
||||
//
|
||||
// For more information, see Spot Fleet requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
// in the Amazon EC2 User Guide.
|
||||
//
|
||||
// We strongly discourage using the RequestSpotFleet API because it is a legacy
|
||||
// API with no planned investment. For options for requesting Spot Instances,
|
||||
// see Which is the best Spot request method to use? (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-best-practices.html#which-spot-request-method-to-use)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
// in the Amazon EC2 User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
|
|
@ -57662,6 +57738,95 @@ func (s CancelExportTaskOutput) GoString() string {
|
|||
return s.String()
|
||||
}
|
||||
|
||||
type CancelImageLaunchPermissionInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Checks whether you have the required permissions for the action, without
|
||||
// actually making the request, and provides an error response. If you have
|
||||
// the required permissions, the error response is DryRunOperation. Otherwise,
|
||||
// it is UnauthorizedOperation.
|
||||
DryRun *bool `type:"boolean"`
|
||||
|
||||
// The ID of the AMI that was shared with your Amazon Web Services account.
|
||||
//
|
||||
// ImageId is a required field
|
||||
ImageId *string `type:"string" required:"true"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s CancelImageLaunchPermissionInput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s CancelImageLaunchPermissionInput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *CancelImageLaunchPermissionInput) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "CancelImageLaunchPermissionInput"}
|
||||
if s.ImageId == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("ImageId"))
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDryRun sets the DryRun field's value.
|
||||
func (s *CancelImageLaunchPermissionInput) SetDryRun(v bool) *CancelImageLaunchPermissionInput {
|
||||
s.DryRun = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetImageId sets the ImageId field's value.
|
||||
func (s *CancelImageLaunchPermissionInput) SetImageId(v string) *CancelImageLaunchPermissionInput {
|
||||
s.ImageId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
type CancelImageLaunchPermissionOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Returns true if the request succeeds; otherwise, it returns an error.
|
||||
Return *bool `locationName:"return" type:"boolean"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s CancelImageLaunchPermissionOutput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s CancelImageLaunchPermissionOutput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetReturn sets the Return field's value.
|
||||
func (s *CancelImageLaunchPermissionOutput) SetReturn(v bool) *CancelImageLaunchPermissionOutput {
|
||||
s.Return = &v
|
||||
return s
|
||||
}
|
||||
|
||||
type CancelImportTaskInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
|
@ -67938,7 +68103,7 @@ func (s *CreatePlacementGroupInput) SetTagSpecifications(v []*TagSpecification)
|
|||
type CreatePlacementGroupOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Describes a placement group.
|
||||
// Information about the placement group.
|
||||
PlacementGroup *PlacementGroup `locationName:"placementGroup" type:"structure"`
|
||||
}
|
||||
|
||||
|
|
@ -105507,7 +105672,8 @@ type EnableImageDeprecationInput struct {
|
|||
// the seconds to the nearest minute.
|
||||
//
|
||||
// You can’t specify a date in the past. The upper limit for DeprecateAt is
|
||||
// 10 years from now.
|
||||
// 10 years from now, except for public AMIs, where the upper limit is 2 years
|
||||
// from the creation date.
|
||||
//
|
||||
// DeprecateAt is a required field
|
||||
DeprecateAt *time.Time `type:"timestamp" required:"true"`
|
||||
|
|
@ -109037,7 +109203,7 @@ func (s *FleetLaunchTemplateOverridesRequest) SetWeightedCapacity(v float64) *Fl
|
|||
//
|
||||
// For information about launch templates, see Launch an instance from a launch
|
||||
// template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
// in the Amazon EC2 User Guide.
|
||||
type FleetLaunchTemplateSpecification struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
|
@ -120611,6 +120777,16 @@ func (s *InstancePrivateIpAddress) SetPrivateIpAddress(v string) *InstancePrivat
|
|||
// all of the specified attributes. If you specify multiple values for an attribute,
|
||||
// you get instance types that satisfy any of the specified values.
|
||||
//
|
||||
// To limit the list of instance types from which Amazon EC2 can identify matching
|
||||
// instance types, you can use one of the following parameters, but not both
|
||||
// in the same request:
|
||||
//
|
||||
// - AllowedInstanceTypes - The instance types to include in the list. All
|
||||
// other instance types are ignored, even if they match your specified attributes.
|
||||
//
|
||||
// - ExcludedInstanceTypes - The instance types to exclude from the list,
|
||||
// even if they match your specified attributes.
|
||||
//
|
||||
// You must specify VCpuCount and MemoryMiB. All other attributes are optional.
|
||||
// Any unspecified optional attribute is set to its default.
|
||||
//
|
||||
|
|
@ -120683,6 +120859,23 @@ type InstanceRequirements struct {
|
|||
// Default: Any accelerator type
|
||||
AcceleratorTypes []*string `locationName:"acceleratorTypeSet" locationNameList:"item" type:"list" enum:"AcceleratorType"`
|
||||
|
||||
// The instance types to apply your specified attributes against. All other
|
||||
// instance types are ignored, even if they match your specified attributes.
|
||||
//
|
||||
// You can use strings with one or more wild cards, represented by an asterisk
|
||||
// (*), to allow an instance type, size, or generation. The following are examples:
|
||||
// m5.8xlarge, c5*.*, m5a.*, r*, *3*.
|
||||
//
|
||||
// For example, if you specify c5*,Amazon EC2 will allow the entire C5 instance
|
||||
// family, which includes all C5a and C5n instance types. If you specify m5a.*,
|
||||
// Amazon EC2 will allow all the M5a instance types, but not the M5n instance
|
||||
// types.
|
||||
//
|
||||
// If you specify AllowedInstanceTypes, you can't specify ExcludedInstanceTypes.
|
||||
//
|
||||
// Default: All instance types
|
||||
AllowedInstanceTypes []*string `locationName:"allowedInstanceTypeSet" locationNameList:"item" type:"list"`
|
||||
|
||||
// Indicates whether bare metal instance types must be included, excluded, or
|
||||
// required.
|
||||
//
|
||||
|
|
@ -120740,6 +120933,8 @@ type InstanceRequirements struct {
|
|||
// Amazon EC2 will exclude all the M5a instance types, but not the M5n instance
|
||||
// types.
|
||||
//
|
||||
// If you specify ExcludedInstanceTypes, you can't specify AllowedInstanceTypes.
|
||||
//
|
||||
// Default: No excluded instance types
|
||||
ExcludedInstanceTypes []*string `locationName:"excludedInstanceTypeSet" locationNameList:"item" type:"list"`
|
||||
|
||||
|
|
@ -120787,6 +120982,12 @@ type InstanceRequirements struct {
|
|||
// The minimum and maximum amount of memory, in MiB.
|
||||
MemoryMiB *MemoryMiB `locationName:"memoryMiB" type:"structure"`
|
||||
|
||||
// The minimum and maximum amount of network bandwidth, in gigabits per second
|
||||
// (Gbps).
|
||||
//
|
||||
// Default: No minimum or maximum limits
|
||||
NetworkBandwidthGbps *NetworkBandwidthGbps `locationName:"networkBandwidthGbps" type:"structure"`
|
||||
|
||||
// The minimum and maximum number of network interfaces.
|
||||
//
|
||||
// Default: No minimum or maximum limits
|
||||
|
|
@ -120896,6 +121097,12 @@ func (s *InstanceRequirements) SetAcceleratorTypes(v []*string) *InstanceRequire
|
|||
return s
|
||||
}
|
||||
|
||||
// SetAllowedInstanceTypes sets the AllowedInstanceTypes field's value.
|
||||
func (s *InstanceRequirements) SetAllowedInstanceTypes(v []*string) *InstanceRequirements {
|
||||
s.AllowedInstanceTypes = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetBareMetal sets the BareMetal field's value.
|
||||
func (s *InstanceRequirements) SetBareMetal(v string) *InstanceRequirements {
|
||||
s.BareMetal = &v
|
||||
|
|
@ -120956,6 +121163,12 @@ func (s *InstanceRequirements) SetMemoryMiB(v *MemoryMiB) *InstanceRequirements
|
|||
return s
|
||||
}
|
||||
|
||||
// SetNetworkBandwidthGbps sets the NetworkBandwidthGbps field's value.
|
||||
func (s *InstanceRequirements) SetNetworkBandwidthGbps(v *NetworkBandwidthGbps) *InstanceRequirements {
|
||||
s.NetworkBandwidthGbps = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetNetworkInterfaceCount sets the NetworkInterfaceCount field's value.
|
||||
func (s *InstanceRequirements) SetNetworkInterfaceCount(v *NetworkInterfaceCount) *InstanceRequirements {
|
||||
s.NetworkInterfaceCount = v
|
||||
|
|
@ -120999,6 +121212,16 @@ func (s *InstanceRequirements) SetVCpuCount(v *VCpuCountRange) *InstanceRequirem
|
|||
// all of the specified attributes. If you specify multiple values for an attribute,
|
||||
// you get instance types that satisfy any of the specified values.
|
||||
//
|
||||
// To limit the list of instance types from which Amazon EC2 can identify matching
|
||||
// instance types, you can use one of the following parameters, but not both
|
||||
// in the same request:
|
||||
//
|
||||
// - AllowedInstanceTypes - The instance types to include in the list. All
|
||||
// other instance types are ignored, even if they match your specified attributes.
|
||||
//
|
||||
// - ExcludedInstanceTypes - The instance types to exclude from the list,
|
||||
// even if they match your specified attributes.
|
||||
//
|
||||
// You must specify VCpuCount and MemoryMiB. All other attributes are optional.
|
||||
// Any unspecified optional attribute is set to its default.
|
||||
//
|
||||
|
|
@ -121071,6 +121294,23 @@ type InstanceRequirementsRequest struct {
|
|||
// Default: Any accelerator type
|
||||
AcceleratorTypes []*string `locationName:"AcceleratorType" locationNameList:"item" type:"list" enum:"AcceleratorType"`
|
||||
|
||||
// The instance types to apply your specified attributes against. All other
|
||||
// instance types are ignored, even if they match your specified attributes.
|
||||
//
|
||||
// You can use strings with one or more wild cards, represented by an asterisk
|
||||
// (*), to allow an instance type, size, or generation. The following are examples:
|
||||
// m5.8xlarge, c5*.*, m5a.*, r*, *3*.
|
||||
//
|
||||
// For example, if you specify c5*,Amazon EC2 will allow the entire C5 instance
|
||||
// family, which includes all C5a and C5n instance types. If you specify m5a.*,
|
||||
// Amazon EC2 will allow all the M5a instance types, but not the M5n instance
|
||||
// types.
|
||||
//
|
||||
// If you specify AllowedInstanceTypes, you can't specify ExcludedInstanceTypes.
|
||||
//
|
||||
// Default: All instance types
|
||||
AllowedInstanceTypes []*string `locationName:"AllowedInstanceType" locationNameList:"item" type:"list"`
|
||||
|
||||
// Indicates whether bare metal instance types must be included, excluded, or
|
||||
// required.
|
||||
//
|
||||
|
|
@ -121128,6 +121368,8 @@ type InstanceRequirementsRequest struct {
|
|||
// Amazon EC2 will exclude all the M5a instance types, but not the M5n instance
|
||||
// types.
|
||||
//
|
||||
// If you specify ExcludedInstanceTypes, you can't specify AllowedInstanceTypes.
|
||||
//
|
||||
// Default: No excluded instance types
|
||||
ExcludedInstanceTypes []*string `locationName:"ExcludedInstanceType" locationNameList:"item" type:"list"`
|
||||
|
||||
|
|
@ -121177,6 +121419,12 @@ type InstanceRequirementsRequest struct {
|
|||
// MemoryMiB is a required field
|
||||
MemoryMiB *MemoryMiBRequest `type:"structure" required:"true"`
|
||||
|
||||
// The minimum and maximum amount of network bandwidth, in gigabits per second
|
||||
// (Gbps).
|
||||
//
|
||||
// Default: No minimum or maximum limits
|
||||
NetworkBandwidthGbps *NetworkBandwidthGbpsRequest `type:"structure"`
|
||||
|
||||
// The minimum and maximum number of network interfaces.
|
||||
//
|
||||
// Default: No minimum or maximum limits
|
||||
|
|
@ -121314,6 +121562,12 @@ func (s *InstanceRequirementsRequest) SetAcceleratorTypes(v []*string) *Instance
|
|||
return s
|
||||
}
|
||||
|
||||
// SetAllowedInstanceTypes sets the AllowedInstanceTypes field's value.
|
||||
func (s *InstanceRequirementsRequest) SetAllowedInstanceTypes(v []*string) *InstanceRequirementsRequest {
|
||||
s.AllowedInstanceTypes = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetBareMetal sets the BareMetal field's value.
|
||||
func (s *InstanceRequirementsRequest) SetBareMetal(v string) *InstanceRequirementsRequest {
|
||||
s.BareMetal = &v
|
||||
|
|
@ -121374,6 +121628,12 @@ func (s *InstanceRequirementsRequest) SetMemoryMiB(v *MemoryMiBRequest) *Instanc
|
|||
return s
|
||||
}
|
||||
|
||||
// SetNetworkBandwidthGbps sets the NetworkBandwidthGbps field's value.
|
||||
func (s *InstanceRequirementsRequest) SetNetworkBandwidthGbps(v *NetworkBandwidthGbpsRequest) *InstanceRequirementsRequest {
|
||||
s.NetworkBandwidthGbps = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetNetworkInterfaceCount sets the NetworkInterfaceCount field's value.
|
||||
func (s *InstanceRequirementsRequest) SetNetworkInterfaceCount(v *NetworkInterfaceCountRequest) *InstanceRequirementsRequest {
|
||||
s.NetworkInterfaceCount = v
|
||||
|
|
@ -126640,6 +126900,10 @@ type LaunchTemplatePlacement struct {
|
|||
// The Availability Zone of the instance.
|
||||
AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
|
||||
|
||||
// The Group ID of the placement group. You must specify the Placement Group
|
||||
// Group ID to launch an instance in a shared placement group.
|
||||
GroupId *string `locationName:"groupId" type:"string"`
|
||||
|
||||
// The name of the placement group for the instance.
|
||||
GroupName *string `locationName:"groupName" type:"string"`
|
||||
|
||||
|
|
@ -126691,6 +126955,12 @@ func (s *LaunchTemplatePlacement) SetAvailabilityZone(v string) *LaunchTemplateP
|
|||
return s
|
||||
}
|
||||
|
||||
// SetGroupId sets the GroupId field's value.
|
||||
func (s *LaunchTemplatePlacement) SetGroupId(v string) *LaunchTemplatePlacement {
|
||||
s.GroupId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetGroupName sets the GroupName field's value.
|
||||
func (s *LaunchTemplatePlacement) SetGroupName(v string) *LaunchTemplatePlacement {
|
||||
s.GroupName = &v
|
||||
|
|
@ -126737,6 +127007,10 @@ type LaunchTemplatePlacementRequest struct {
|
|||
// The Availability Zone for the instance.
|
||||
AvailabilityZone *string `type:"string"`
|
||||
|
||||
// The Group Id of a placement group. You must specify the Placement Group Group
|
||||
// Id to launch an instance in a shared placement group.
|
||||
GroupId *string `type:"string"`
|
||||
|
||||
// The name of the placement group for the instance.
|
||||
GroupName *string `type:"string"`
|
||||
|
||||
|
|
@ -126790,6 +127064,12 @@ func (s *LaunchTemplatePlacementRequest) SetAvailabilityZone(v string) *LaunchTe
|
|||
return s
|
||||
}
|
||||
|
||||
// SetGroupId sets the GroupId field's value.
|
||||
func (s *LaunchTemplatePlacementRequest) SetGroupId(v string) *LaunchTemplatePlacementRequest {
|
||||
s.GroupId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetGroupName sets the GroupName field's value.
|
||||
func (s *LaunchTemplatePlacementRequest) SetGroupName(v string) *LaunchTemplatePlacementRequest {
|
||||
s.GroupName = &v
|
||||
|
|
@ -131199,7 +131479,7 @@ func (s *ModifyInstanceEventStartTimeInput) SetNotBefore(v time.Time) *ModifyIns
|
|||
type ModifyInstanceEventStartTimeOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Describes a scheduled event for an instance.
|
||||
// Information about the event.
|
||||
Event *InstanceStatusEvent `locationName:"event" type:"structure"`
|
||||
}
|
||||
|
||||
|
|
@ -131644,6 +131924,10 @@ type ModifyInstancePlacementInput struct {
|
|||
// The affinity setting for the instance.
|
||||
Affinity *string `locationName:"affinity" type:"string" enum:"Affinity"`
|
||||
|
||||
// The Group Id of a placement group. You must specify the Placement Group Group
|
||||
// Id to launch an instance in a shared placement group.
|
||||
GroupId *string `type:"string"`
|
||||
|
||||
// The name of the placement group in which to place the instance. For spread
|
||||
// placement groups, the instance must have a tenancy of default. For cluster
|
||||
// and partition placement groups, the instance must have a tenancy of default
|
||||
|
|
@ -131712,6 +131996,12 @@ func (s *ModifyInstancePlacementInput) SetAffinity(v string) *ModifyInstancePlac
|
|||
return s
|
||||
}
|
||||
|
||||
// SetGroupId sets the GroupId field's value.
|
||||
func (s *ModifyInstancePlacementInput) SetGroupId(v string) *ModifyInstancePlacementInput {
|
||||
s.GroupId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetGroupName sets the GroupName field's value.
|
||||
func (s *ModifyInstancePlacementInput) SetGroupName(v string) *ModifyInstancePlacementInput {
|
||||
s.GroupName = &v
|
||||
|
|
@ -137603,6 +137893,108 @@ func (s *NetworkAclEntry) SetRuleNumber(v int64) *NetworkAclEntry {
|
|||
return s
|
||||
}
|
||||
|
||||
// The minimum and maximum amount of network bandwidth, in gigabits per second
|
||||
// (Gbps).
|
||||
//
|
||||
// Setting the minimum bandwidth does not guarantee that your instance will
|
||||
// achieve the minimum bandwidth. Amazon EC2 will identify instance types that
|
||||
// support the specified minimum bandwidth, but the actual bandwidth of your
|
||||
// instance might go below the specified minimum at times. For more information,
|
||||
// see Available instance bandwidth (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-network-bandwidth.html#available-instance-bandwidth)
|
||||
// in the Amazon EC2 User Guide.
|
||||
type NetworkBandwidthGbps struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The maximum amount of network bandwidth, in Gbps. If this parameter is not
|
||||
// specified, there is no maximum limit.
|
||||
Max *float64 `locationName:"max" type:"double"`
|
||||
|
||||
// The minimum amount of network bandwidth, in Gbps. If this parameter is not
|
||||
// specified, there is no minimum limit.
|
||||
Min *float64 `locationName:"min" type:"double"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s NetworkBandwidthGbps) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s NetworkBandwidthGbps) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetMax sets the Max field's value.
|
||||
func (s *NetworkBandwidthGbps) SetMax(v float64) *NetworkBandwidthGbps {
|
||||
s.Max = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetMin sets the Min field's value.
|
||||
func (s *NetworkBandwidthGbps) SetMin(v float64) *NetworkBandwidthGbps {
|
||||
s.Min = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// The minimum and maximum amount of network bandwidth, in gigabits per second
|
||||
// (Gbps).
|
||||
//
|
||||
// Setting the minimum bandwidth does not guarantee that your instance will
|
||||
// achieve the minimum bandwidth. Amazon EC2 will identify instance types that
|
||||
// support the specified minimum bandwidth, but the actual bandwidth of your
|
||||
// instance might go below the specified minimum at times. For more information,
|
||||
// see Available instance bandwidth (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-network-bandwidth.html#available-instance-bandwidth)
|
||||
// in the Amazon EC2 User Guide.
|
||||
type NetworkBandwidthGbpsRequest struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The maximum amount of network bandwidth, in Gbps. To specify no maximum limit,
|
||||
// omit this parameter.
|
||||
Max *float64 `type:"double"`
|
||||
|
||||
// The minimum amount of network bandwidth, in Gbps. To specify no minimum limit,
|
||||
// omit this parameter.
|
||||
Min *float64 `type:"double"`
|
||||
}
|
||||
|
||||
// String returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s NetworkBandwidthGbpsRequest) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation.
|
||||
//
|
||||
// API parameter values that are decorated as "sensitive" in the API will not
|
||||
// be included in the string output. The member name will be present, but the
|
||||
// value will be replaced with "sensitive".
|
||||
func (s NetworkBandwidthGbpsRequest) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetMax sets the Max field's value.
|
||||
func (s *NetworkBandwidthGbpsRequest) SetMax(v float64) *NetworkBandwidthGbpsRequest {
|
||||
s.Max = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetMin sets the Min field's value.
|
||||
func (s *NetworkBandwidthGbpsRequest) SetMin(v float64) *NetworkBandwidthGbpsRequest {
|
||||
s.Min = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// Describes the network card support of the instance type.
|
||||
type NetworkCardInfo struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
|
@ -140430,6 +140822,9 @@ type Placement struct {
|
|||
// This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet).
|
||||
AvailabilityZone *string `locationName:"availabilityZone" type:"string"`
|
||||
|
||||
// The Group Id of the placement group.
|
||||
GroupId *string `locationName:"groupId" type:"string"`
|
||||
|
||||
// The name of the placement group the instance is in.
|
||||
GroupName *string `locationName:"groupName" type:"string"`
|
||||
|
||||
|
|
@ -140500,6 +140895,12 @@ func (s *Placement) SetAvailabilityZone(v string) *Placement {
|
|||
return s
|
||||
}
|
||||
|
||||
// SetGroupId sets the GroupId field's value.
|
||||
func (s *Placement) SetGroupId(v string) *Placement {
|
||||
s.GroupId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetGroupName sets the GroupName field's value.
|
||||
func (s *Placement) SetGroupName(v string) *Placement {
|
||||
s.GroupName = &v
|
||||
|
|
@ -155008,29 +155409,46 @@ type SpotFleetRequestConfigData struct {
|
|||
// The strategy that determines how to allocate the target Spot Instance capacity
|
||||
// across the Spot Instance pools specified by the Spot Fleet launch configuration.
|
||||
// For more information, see Allocation strategies for Spot Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-allocation-strategy.html)
|
||||
// in the Amazon EC2 User Guide for Linux Instances.
|
||||
// in the Amazon EC2 User Guide.
|
||||
//
|
||||
// lowestPrice - Spot Fleet launches instances from the lowest-price Spot Instance
|
||||
// pool that has available capacity. If the cheapest pool doesn't have available
|
||||
// capacity, the Spot Instances come from the next cheapest pool that has available
|
||||
// capacity. If a pool runs out of capacity before fulfilling your desired capacity,
|
||||
// Spot Fleet will continue to fulfill your request by drawing from the next
|
||||
// cheapest pool. To ensure that your desired capacity is met, you might receive
|
||||
// Spot Instances from several pools.
|
||||
// priceCapacityOptimized (recommended)
|
||||
//
|
||||
// diversified - Spot Fleet launches instances from all of the Spot Instance
|
||||
// pools that you specify.
|
||||
// Spot Fleet identifies the pools with the highest capacity availability for
|
||||
// the number of instances that are launching. This means that we will request
|
||||
// Spot Instances from the pools that we believe have the lowest chance of interruption
|
||||
// in the near term. Spot Fleet then requests Spot Instances from the lowest
|
||||
// priced of these pools.
|
||||
//
|
||||
// capacityOptimized (recommended) - Spot Fleet launches instances from Spot
|
||||
// Instance pools with optimal capacity for the number of instances that are
|
||||
// launching. To give certain instance types a higher chance of launching first,
|
||||
// use capacityOptimizedPrioritized. Set a priority for each instance type by
|
||||
// using the Priority parameter for LaunchTemplateOverrides. You can assign
|
||||
// the same priority to different LaunchTemplateOverrides. EC2 implements the
|
||||
// priorities on a best-effort basis, but optimizes for capacity first. capacityOptimizedPrioritized
|
||||
// is supported only if your Spot Fleet uses a launch template. Note that if
|
||||
// the OnDemandAllocationStrategy is set to prioritized, the same priority is
|
||||
// applied when fulfilling On-Demand capacity.
|
||||
// capacityOptimized
|
||||
//
|
||||
// Spot Fleet identifies the pools with the highest capacity availability for
|
||||
// the number of instances that are launching. This means that we will request
|
||||
// Spot Instances from the pools that we believe have the lowest chance of interruption
|
||||
// in the near term. To give certain instance types a higher chance of launching
|
||||
// first, use capacityOptimizedPrioritized. Set a priority for each instance
|
||||
// type by using the Priority parameter for LaunchTemplateOverrides. You can
|
||||
// assign the same priority to different LaunchTemplateOverrides. EC2 implements
|
||||
// the priorities on a best-effort basis, but optimizes for capacity first.
|
||||
// capacityOptimizedPrioritized is supported only if your Spot Fleet uses a
|
||||
// launch template. Note that if the OnDemandAllocationStrategy is set to prioritized,
|
||||
// the same priority is applied when fulfilling On-Demand capacity.
|
||||
//
|
||||
// diversified
|
||||
//
|
||||
// Spot Fleet requests instances from all of the Spot Instance pools that you
|
||||
// specify.
|
||||
//
|
||||
// lowestPrice
|
||||
//
|
||||
// Spot Fleet requests instances from the lowest priced Spot Instance pool that
|
||||
// has available capacity. If the lowest priced pool doesn't have available
|
||||
// capacity, the Spot Instances come from the next lowest priced pool that has
|
||||
// available capacity. If a pool runs out of capacity before fulfilling your
|
||||
// desired capacity, Spot Fleet will continue to fulfill your request by drawing
|
||||
// from the next lowest priced pool. To ensure that your desired capacity is
|
||||
// met, you might receive Spot Instances from several pools. Because this strategy
|
||||
// only considers instance price and not capacity availability, it might lead
|
||||
// to high interruption rates.
|
||||
//
|
||||
// Default: lowestPrice
|
||||
AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"`
|
||||
|
|
@ -155056,9 +155474,9 @@ type SpotFleetRequestConfigData struct {
|
|||
// role that grants the Spot Fleet the permission to request, launch, terminate,
|
||||
// and tag instances on your behalf. For more information, see Spot Fleet prerequisites
|
||||
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html#spot-fleet-prerequisites)
|
||||
// in the Amazon EC2 User Guide for Linux Instances. Spot Fleet can terminate
|
||||
// Spot Instances on your behalf when you cancel its Spot Fleet request using
|
||||
// CancelSpotFleetRequests (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CancelSpotFleetRequests)
|
||||
// in the Amazon EC2 User Guide. Spot Fleet can terminate Spot Instances on
|
||||
// your behalf when you cancel its Spot Fleet request using CancelSpotFleetRequests
|
||||
// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CancelSpotFleetRequests)
|
||||
// or when the Spot Fleet request expires, if you set TerminateInstancesWithExpiration.
|
||||
//
|
||||
// IamFleetRole is a required field
|
||||
|
|
@ -155894,27 +156312,44 @@ type SpotOptions struct {
|
|||
// For more information, see Allocation strategies for Spot Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-allocation-strategy.html)
|
||||
// in the Amazon EC2 User Guide.
|
||||
//
|
||||
// lowest-price - EC2 Fleet launches instances from the lowest-price Spot Instance
|
||||
// pool that has available capacity. If the cheapest pool doesn't have available
|
||||
// capacity, the Spot Instances come from the next cheapest pool that has available
|
||||
// capacity. If a pool runs out of capacity before fulfilling your desired capacity,
|
||||
// EC2 Fleet will continue to fulfill your request by drawing from the next
|
||||
// cheapest pool. To ensure that your desired capacity is met, you might receive
|
||||
// Spot Instances from several pools.
|
||||
// price-capacity-optimized (recommended)
|
||||
//
|
||||
// diversified - EC2 Fleet launches instances from all of the Spot Instance
|
||||
// pools that you specify.
|
||||
// EC2 Fleet identifies the pools with the highest capacity availability for
|
||||
// the number of instances that are launching. This means that we will request
|
||||
// Spot Instances from the pools that we believe have the lowest chance of interruption
|
||||
// in the near term. EC2 Fleet then requests Spot Instances from the lowest
|
||||
// priced of these pools.
|
||||
//
|
||||
// capacity-optimized (recommended) - EC2 Fleet launches instances from Spot
|
||||
// Instance pools with optimal capacity for the number of instances that are
|
||||
// launching. To give certain instance types a higher chance of launching first,
|
||||
// use capacity-optimized-prioritized. Set a priority for each instance type
|
||||
// by using the Priority parameter for LaunchTemplateOverrides. You can assign
|
||||
// the same priority to different LaunchTemplateOverrides. EC2 implements the
|
||||
// priorities on a best-effort basis, but optimizes for capacity first. capacity-optimized-prioritized
|
||||
// is supported only if your fleet uses a launch template. Note that if the
|
||||
// On-Demand AllocationStrategy is set to prioritized, the same priority is
|
||||
// applied when fulfilling On-Demand capacity.
|
||||
// capacity-optimized
|
||||
//
|
||||
// EC2 Fleet identifies the pools with the highest capacity availability for
|
||||
// the number of instances that are launching. This means that we will request
|
||||
// Spot Instances from the pools that we believe have the lowest chance of interruption
|
||||
// in the near term. To give certain instance types a higher chance of launching
|
||||
// first, use capacity-optimized-prioritized. Set a priority for each instance
|
||||
// type by using the Priority parameter for LaunchTemplateOverrides. You can
|
||||
// assign the same priority to different LaunchTemplateOverrides. EC2 implements
|
||||
// the priorities on a best-effort basis, but optimizes for capacity first.
|
||||
// capacity-optimized-prioritized is supported only if your EC2 Fleet uses a
|
||||
// launch template. Note that if the On-Demand AllocationStrategy is set to
|
||||
// prioritized, the same priority is applied when fulfilling On-Demand capacity.
|
||||
//
|
||||
// diversified
|
||||
//
|
||||
// EC2 Fleet requests instances from all of the Spot Instance pools that you
|
||||
// specify.
|
||||
//
|
||||
// lowest-price
|
||||
//
|
||||
// EC2 Fleet requests instances from the lowest priced Spot Instance pool that
|
||||
// has available capacity. If the lowest priced pool doesn't have available
|
||||
// capacity, the Spot Instances come from the next lowest priced pool that has
|
||||
// available capacity. If a pool runs out of capacity before fulfilling your
|
||||
// desired capacity, EC2 Fleet will continue to fulfill your request by drawing
|
||||
// from the next lowest priced pool. To ensure that your desired capacity is
|
||||
// met, you might receive Spot Instances from several pools. Because this strategy
|
||||
// only considers instance price and not capacity availability, it might lead
|
||||
// to high interruption rates.
|
||||
//
|
||||
// Default: lowest-price
|
||||
AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"SpotAllocationStrategy"`
|
||||
|
|
@ -156049,27 +156484,44 @@ type SpotOptionsRequest struct {
|
|||
// For more information, see Allocation strategies for Spot Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-allocation-strategy.html)
|
||||
// in the Amazon EC2 User Guide.
|
||||
//
|
||||
// lowest-price - EC2 Fleet launches instances from the lowest-price Spot Instance
|
||||
// pool that has available capacity. If the cheapest pool doesn't have available
|
||||
// capacity, the Spot Instances come from the next cheapest pool that has available
|
||||
// capacity. If a pool runs out of capacity before fulfilling your desired capacity,
|
||||
// EC2 Fleet will continue to fulfill your request by drawing from the next
|
||||
// cheapest pool. To ensure that your desired capacity is met, you might receive
|
||||
// Spot Instances from several pools.
|
||||
// price-capacity-optimized (recommended)
|
||||
//
|
||||
// diversified - EC2 Fleet launches instances from all of the Spot Instance
|
||||
// pools that you specify.
|
||||
// EC2 Fleet identifies the pools with the highest capacity availability for
|
||||
// the number of instances that are launching. This means that we will request
|
||||
// Spot Instances from the pools that we believe have the lowest chance of interruption
|
||||
// in the near term. EC2 Fleet then requests Spot Instances from the lowest
|
||||
// priced of these pools.
|
||||
//
|
||||
// capacity-optimized (recommended) - EC2 Fleet launches instances from Spot
|
||||
// Instance pools with optimal capacity for the number of instances that are
|
||||
// launching. To give certain instance types a higher chance of launching first,
|
||||
// use capacity-optimized-prioritized. Set a priority for each instance type
|
||||
// by using the Priority parameter for LaunchTemplateOverrides. You can assign
|
||||
// the same priority to different LaunchTemplateOverrides. EC2 implements the
|
||||
// priorities on a best-effort basis, but optimizes for capacity first. capacity-optimized-prioritized
|
||||
// is supported only if your fleet uses a launch template. Note that if the
|
||||
// On-Demand AllocationStrategy is set to prioritized, the same priority is
|
||||
// applied when fulfilling On-Demand capacity.
|
||||
// capacity-optimized
|
||||
//
|
||||
// EC2 Fleet identifies the pools with the highest capacity availability for
|
||||
// the number of instances that are launching. This means that we will request
|
||||
// Spot Instances from the pools that we believe have the lowest chance of interruption
|
||||
// in the near term. To give certain instance types a higher chance of launching
|
||||
// first, use capacity-optimized-prioritized. Set a priority for each instance
|
||||
// type by using the Priority parameter for LaunchTemplateOverrides. You can
|
||||
// assign the same priority to different LaunchTemplateOverrides. EC2 implements
|
||||
// the priorities on a best-effort basis, but optimizes for capacity first.
|
||||
// capacity-optimized-prioritized is supported only if your EC2 Fleet uses a
|
||||
// launch template. Note that if the On-Demand AllocationStrategy is set to
|
||||
// prioritized, the same priority is applied when fulfilling On-Demand capacity.
|
||||
//
|
||||
// diversified
|
||||
//
|
||||
// EC2 Fleet requests instances from all of the Spot Instance pools that you
|
||||
// specify.
|
||||
//
|
||||
// lowest-price
|
||||
//
|
||||
// EC2 Fleet requests instances from the lowest priced Spot Instance pool that
|
||||
// has available capacity. If the lowest priced pool doesn't have available
|
||||
// capacity, the Spot Instances come from the next lowest priced pool that has
|
||||
// available capacity. If a pool runs out of capacity before fulfilling your
|
||||
// desired capacity, EC2 Fleet will continue to fulfill your request by drawing
|
||||
// from the next lowest priced pool. To ensure that your desired capacity is
|
||||
// met, you might receive Spot Instances from several pools. Because this strategy
|
||||
// only considers instance price and not capacity availability, it might lead
|
||||
// to high interruption rates.
|
||||
//
|
||||
// Default: lowest-price
|
||||
AllocationStrategy *string `type:"string" enum:"SpotAllocationStrategy"`
|
||||
|
|
@ -166857,6 +167309,9 @@ const (
|
|||
|
||||
// AllocationStrategyCapacityOptimizedPrioritized is a AllocationStrategy enum value
|
||||
AllocationStrategyCapacityOptimizedPrioritized = "capacityOptimizedPrioritized"
|
||||
|
||||
// AllocationStrategyPriceCapacityOptimized is a AllocationStrategy enum value
|
||||
AllocationStrategyPriceCapacityOptimized = "priceCapacityOptimized"
|
||||
)
|
||||
|
||||
// AllocationStrategy_Values returns all elements of the AllocationStrategy enum
|
||||
|
|
@ -166866,6 +167321,7 @@ func AllocationStrategy_Values() []string {
|
|||
AllocationStrategyDiversified,
|
||||
AllocationStrategyCapacityOptimized,
|
||||
AllocationStrategyCapacityOptimizedPrioritized,
|
||||
AllocationStrategyPriceCapacityOptimized,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -166945,6 +167401,9 @@ const (
|
|||
|
||||
// ArchitectureTypeX8664Mac is a ArchitectureType enum value
|
||||
ArchitectureTypeX8664Mac = "x86_64_mac"
|
||||
|
||||
// ArchitectureTypeArm64Mac is a ArchitectureType enum value
|
||||
ArchitectureTypeArm64Mac = "arm64_mac"
|
||||
)
|
||||
|
||||
// ArchitectureType_Values returns all elements of the ArchitectureType enum
|
||||
|
|
@ -166954,6 +167413,7 @@ func ArchitectureType_Values() []string {
|
|||
ArchitectureTypeX8664,
|
||||
ArchitectureTypeArm64,
|
||||
ArchitectureTypeX8664Mac,
|
||||
ArchitectureTypeArm64Mac,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -166969,6 +167429,9 @@ const (
|
|||
|
||||
// ArchitectureValuesX8664Mac is a ArchitectureValues enum value
|
||||
ArchitectureValuesX8664Mac = "x86_64_mac"
|
||||
|
||||
// ArchitectureValuesArm64Mac is a ArchitectureValues enum value
|
||||
ArchitectureValuesArm64Mac = "arm64_mac"
|
||||
)
|
||||
|
||||
// ArchitectureValues_Values returns all elements of the ArchitectureValues enum
|
||||
|
|
@ -166978,6 +167441,7 @@ func ArchitectureValues_Values() []string {
|
|||
ArchitectureValuesX8664,
|
||||
ArchitectureValuesArm64,
|
||||
ArchitectureValuesX8664Mac,
|
||||
ArchitectureValuesArm64Mac,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -171048,6 +171512,18 @@ const (
|
|||
|
||||
// InstanceTypeU3tb156xlarge is a InstanceType enum value
|
||||
InstanceTypeU3tb156xlarge = "u-3tb1.56xlarge"
|
||||
|
||||
// InstanceTypeU18tb1112xlarge is a InstanceType enum value
|
||||
InstanceTypeU18tb1112xlarge = "u-18tb1.112xlarge"
|
||||
|
||||
// InstanceTypeU24tb1112xlarge is a InstanceType enum value
|
||||
InstanceTypeU24tb1112xlarge = "u-24tb1.112xlarge"
|
||||
|
||||
// InstanceTypeTrn12xlarge is a InstanceType enum value
|
||||
InstanceTypeTrn12xlarge = "trn1.2xlarge"
|
||||
|
||||
// InstanceTypeTrn132xlarge is a InstanceType enum value
|
||||
InstanceTypeTrn132xlarge = "trn1.32xlarge"
|
||||
)
|
||||
|
||||
// InstanceType_Values returns all elements of the InstanceType enum
|
||||
|
|
@ -171622,6 +172098,10 @@ func InstanceType_Values() []string {
|
|||
InstanceTypeR6aMetal,
|
||||
InstanceTypeP4de24xlarge,
|
||||
InstanceTypeU3tb156xlarge,
|
||||
InstanceTypeU18tb1112xlarge,
|
||||
InstanceTypeU24tb1112xlarge,
|
||||
InstanceTypeTrn12xlarge,
|
||||
InstanceTypeTrn132xlarge,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -173905,6 +174385,9 @@ const (
|
|||
|
||||
// SpotAllocationStrategyCapacityOptimizedPrioritized is a SpotAllocationStrategy enum value
|
||||
SpotAllocationStrategyCapacityOptimizedPrioritized = "capacity-optimized-prioritized"
|
||||
|
||||
// SpotAllocationStrategyPriceCapacityOptimized is a SpotAllocationStrategy enum value
|
||||
SpotAllocationStrategyPriceCapacityOptimized = "price-capacity-optimized"
|
||||
)
|
||||
|
||||
// SpotAllocationStrategy_Values returns all elements of the SpotAllocationStrategy enum
|
||||
|
|
@ -173914,6 +174397,7 @@ func SpotAllocationStrategy_Values() []string {
|
|||
SpotAllocationStrategyDiversified,
|
||||
SpotAllocationStrategyCapacityOptimized,
|
||||
SpotAllocationStrategyCapacityOptimizedPrioritized,
|
||||
SpotAllocationStrategyPriceCapacityOptimized,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -224,6 +224,10 @@ type EC2API interface {
|
|||
CancelExportTaskWithContext(aws.Context, *ec2.CancelExportTaskInput, ...request.Option) (*ec2.CancelExportTaskOutput, error)
|
||||
CancelExportTaskRequest(*ec2.CancelExportTaskInput) (*request.Request, *ec2.CancelExportTaskOutput)
|
||||
|
||||
CancelImageLaunchPermission(*ec2.CancelImageLaunchPermissionInput) (*ec2.CancelImageLaunchPermissionOutput, error)
|
||||
CancelImageLaunchPermissionWithContext(aws.Context, *ec2.CancelImageLaunchPermissionInput, ...request.Option) (*ec2.CancelImageLaunchPermissionOutput, error)
|
||||
CancelImageLaunchPermissionRequest(*ec2.CancelImageLaunchPermissionInput) (*request.Request, *ec2.CancelImageLaunchPermissionOutput)
|
||||
|
||||
CancelImportTask(*ec2.CancelImportTaskInput) (*ec2.CancelImportTaskOutput, error)
|
||||
CancelImportTaskWithContext(aws.Context, *ec2.CancelImportTaskInput, ...request.Option) (*ec2.CancelImportTaskOutput, error)
|
||||
CancelImportTaskRequest(*ec2.CancelImportTaskInput) (*request.Request, *ec2.CancelImportTaskOutput)
|
||||
|
|
|
|||
|
|
@ -1400,6 +1400,10 @@ func (c *Route53) CreateQueryLoggingConfigRequest(input *CreateQueryLoggingConfi
|
|||
// it can’t be used with the log group associated with query log. Update
|
||||
// or provide a resource policy to grant permissions for the KMS key.
|
||||
//
|
||||
// - The Key management service (KMS) key you specified is marked as disabled
|
||||
// for the log group associated with query log. Update or provide a resource
|
||||
// policy to grant permissions for the KMS key.
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/route53-2013-04-01/CreateQueryLoggingConfig
|
||||
func (c *Route53) CreateQueryLoggingConfig(input *CreateQueryLoggingConfigInput) (*CreateQueryLoggingConfigOutput, error) {
|
||||
req, out := c.CreateQueryLoggingConfigRequest(input)
|
||||
|
|
@ -18312,9 +18316,6 @@ type ResourceRecordSet struct {
|
|||
// an ELB load balancer, and is referred to by an IP address or a DNS domain
|
||||
// name, depending on the record type.
|
||||
//
|
||||
// Although creating latency and latency alias resource record sets in a private
|
||||
// hosted zone is allowed, it's not supported.
|
||||
//
|
||||
// When Amazon Route 53 receives a DNS query for a domain name and type for
|
||||
// which you have created latency resource record sets, Route 53 selects the
|
||||
// latency resource record set that has the lowest latency between the end user
|
||||
|
|
@ -20343,6 +20344,9 @@ const (
|
|||
// CloudWatchRegionEuCentral1 is a CloudWatchRegion enum value
|
||||
CloudWatchRegionEuCentral1 = "eu-central-1"
|
||||
|
||||
// CloudWatchRegionEuCentral2 is a CloudWatchRegion enum value
|
||||
CloudWatchRegionEuCentral2 = "eu-central-2"
|
||||
|
||||
// CloudWatchRegionEuWest1 is a CloudWatchRegion enum value
|
||||
CloudWatchRegionEuWest1 = "eu-west-1"
|
||||
|
||||
|
|
@ -20425,6 +20429,7 @@ func CloudWatchRegion_Values() []string {
|
|||
CloudWatchRegionUsWest2,
|
||||
CloudWatchRegionCaCentral1,
|
||||
CloudWatchRegionEuCentral1,
|
||||
CloudWatchRegionEuCentral2,
|
||||
CloudWatchRegionEuWest1,
|
||||
CloudWatchRegionEuWest2,
|
||||
CloudWatchRegionEuWest3,
|
||||
|
|
@ -20720,6 +20725,9 @@ const (
|
|||
// ResourceRecordSetRegionEuCentral1 is a ResourceRecordSetRegion enum value
|
||||
ResourceRecordSetRegionEuCentral1 = "eu-central-1"
|
||||
|
||||
// ResourceRecordSetRegionEuCentral2 is a ResourceRecordSetRegion enum value
|
||||
ResourceRecordSetRegionEuCentral2 = "eu-central-2"
|
||||
|
||||
// ResourceRecordSetRegionApSoutheast1 is a ResourceRecordSetRegion enum value
|
||||
ResourceRecordSetRegionApSoutheast1 = "ap-southeast-1"
|
||||
|
||||
|
|
@ -20756,6 +20764,9 @@ const (
|
|||
// ResourceRecordSetRegionMeSouth1 is a ResourceRecordSetRegion enum value
|
||||
ResourceRecordSetRegionMeSouth1 = "me-south-1"
|
||||
|
||||
// ResourceRecordSetRegionMeCentral1 is a ResourceRecordSetRegion enum value
|
||||
ResourceRecordSetRegionMeCentral1 = "me-central-1"
|
||||
|
||||
// ResourceRecordSetRegionApSouth1 is a ResourceRecordSetRegion enum value
|
||||
ResourceRecordSetRegionApSouth1 = "ap-south-1"
|
||||
|
||||
|
|
@ -20778,6 +20789,7 @@ func ResourceRecordSetRegion_Values() []string {
|
|||
ResourceRecordSetRegionEuWest2,
|
||||
ResourceRecordSetRegionEuWest3,
|
||||
ResourceRecordSetRegionEuCentral1,
|
||||
ResourceRecordSetRegionEuCentral2,
|
||||
ResourceRecordSetRegionApSoutheast1,
|
||||
ResourceRecordSetRegionApSoutheast2,
|
||||
ResourceRecordSetRegionApSoutheast3,
|
||||
|
|
@ -20790,6 +20802,7 @@ func ResourceRecordSetRegion_Values() []string {
|
|||
ResourceRecordSetRegionCnNorthwest1,
|
||||
ResourceRecordSetRegionApEast1,
|
||||
ResourceRecordSetRegionMeSouth1,
|
||||
ResourceRecordSetRegionMeCentral1,
|
||||
ResourceRecordSetRegionApSouth1,
|
||||
ResourceRecordSetRegionAfSouth1,
|
||||
ResourceRecordSetRegionEuSouth1,
|
||||
|
|
@ -20877,6 +20890,9 @@ const (
|
|||
// VPCRegionEuCentral1 is a VPCRegion enum value
|
||||
VPCRegionEuCentral1 = "eu-central-1"
|
||||
|
||||
// VPCRegionEuCentral2 is a VPCRegion enum value
|
||||
VPCRegionEuCentral2 = "eu-central-2"
|
||||
|
||||
// VPCRegionApEast1 is a VPCRegion enum value
|
||||
VPCRegionApEast1 = "ap-east-1"
|
||||
|
||||
|
|
@ -20952,6 +20968,7 @@ func VPCRegion_Values() []string {
|
|||
VPCRegionEuWest2,
|
||||
VPCRegionEuWest3,
|
||||
VPCRegionEuCentral1,
|
||||
VPCRegionEuCentral2,
|
||||
VPCRegionApEast1,
|
||||
VPCRegionMeSouth1,
|
||||
VPCRegionUsGovWest1,
|
||||
|
|
|
|||
|
|
@ -6,6 +6,18 @@
|
|||
// Amazon Route 53 is a highly available and scalable Domain Name System (DNS)
|
||||
// web service.
|
||||
//
|
||||
// You can use Route 53 to:
|
||||
//
|
||||
// - Register domain names. For more information, see How domain registration
|
||||
// works (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-domain-registration.html).
|
||||
//
|
||||
// - Route internet traffic to the resources for your domain For more information,
|
||||
// see How internet traffic is routed to your website or web application
|
||||
// (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-dns-service.html).
|
||||
//
|
||||
// - Check the health of your resources. For more information, see How Route
|
||||
// 53 checks the health of your resources (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/welcome-health-checks.html).
|
||||
//
|
||||
// See https://docs.aws.amazon.com/goto/WebAPI/route53-2013-04-01 for more information on this service.
|
||||
//
|
||||
// See route53 package documentation for more information.
|
||||
|
|
|
|||
|
|
@ -192,6 +192,10 @@ const (
|
|||
// * The Key management service (KMS) key you specified doesn’t exist or
|
||||
// it can’t be used with the log group associated with query log. Update
|
||||
// or provide a resource policy to grant permissions for the KMS key.
|
||||
//
|
||||
// * The Key management service (KMS) key you specified is marked as disabled
|
||||
// for the log group associated with query log. Update or provide a resource
|
||||
// policy to grant permissions for the KMS key.
|
||||
ErrCodeInsufficientCloudWatchLogsResourcePolicy = "InsufficientCloudWatchLogsResourcePolicy"
|
||||
|
||||
// ErrCodeInvalidArgument for service response error code
|
||||
|
|
|
|||
|
|
@ -54061,6 +54061,9 @@ type StepExecution struct {
|
|||
// The timeout seconds of the step.
|
||||
TimeoutSeconds *int64 `type:"long"`
|
||||
|
||||
// The CloudWatch alarms that were invoked by the automation.
|
||||
TriggeredAlarms []*AlarmStateInformation `min:"1" type:"list"`
|
||||
|
||||
// Strategies used when step fails, we support Continue and Abort. Abort will
|
||||
// fail the automation when the step fails. Continue will ignore the failure
|
||||
// of current step and allow automation to run the next step. With conditional
|
||||
|
|
@ -54213,6 +54216,12 @@ func (s *StepExecution) SetTimeoutSeconds(v int64) *StepExecution {
|
|||
return s
|
||||
}
|
||||
|
||||
// SetTriggeredAlarms sets the TriggeredAlarms field's value.
|
||||
func (s *StepExecution) SetTriggeredAlarms(v []*AlarmStateInformation) *StepExecution {
|
||||
s.TriggeredAlarms = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetValidNextSteps sets the ValidNextSteps field's value.
|
||||
func (s *StepExecution) SetValidNextSteps(v []*string) *StepExecution {
|
||||
s.ValidNextSteps = v
|
||||
|
|
@ -54690,6 +54699,10 @@ type TargetLocation struct {
|
|||
// The Amazon Web Services Regions targeted by the current Automation execution.
|
||||
Regions []*string `min:"1" type:"list"`
|
||||
|
||||
// The details for the CloudWatch alarm you want to apply to an automation or
|
||||
// command.
|
||||
TargetLocationAlarmConfiguration *AlarmConfiguration `type:"structure"`
|
||||
|
||||
// The maximum number of Amazon Web Services Regions and Amazon Web Services
|
||||
// accounts allowed to run the Automation concurrently.
|
||||
TargetLocationMaxConcurrency *string `min:"1" type:"string"`
|
||||
|
|
@ -54735,6 +54748,11 @@ func (s *TargetLocation) Validate() error {
|
|||
if s.TargetLocationMaxErrors != nil && len(*s.TargetLocationMaxErrors) < 1 {
|
||||
invalidParams.Add(request.NewErrParamMinLen("TargetLocationMaxErrors", 1))
|
||||
}
|
||||
if s.TargetLocationAlarmConfiguration != nil {
|
||||
if err := s.TargetLocationAlarmConfiguration.Validate(); err != nil {
|
||||
invalidParams.AddNested("TargetLocationAlarmConfiguration", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
|
|
@ -54760,6 +54778,12 @@ func (s *TargetLocation) SetRegions(v []*string) *TargetLocation {
|
|||
return s
|
||||
}
|
||||
|
||||
// SetTargetLocationAlarmConfiguration sets the TargetLocationAlarmConfiguration field's value.
|
||||
func (s *TargetLocation) SetTargetLocationAlarmConfiguration(v *AlarmConfiguration) *TargetLocation {
|
||||
s.TargetLocationAlarmConfiguration = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetTargetLocationMaxConcurrency sets the TargetLocationMaxConcurrency field's value.
|
||||
func (s *TargetLocation) SetTargetLocationMaxConcurrency(v string) *TargetLocation {
|
||||
s.TargetLocationMaxConcurrency = &v
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# Setup a Global .gitignore for OS and editor generated files:
|
||||
# https://help.github.com/articles/ignoring-files
|
||||
# git config --global core.excludesfile ~/.gitignore_global
|
||||
# go test -c output
|
||||
*.test
|
||||
*.test.exe
|
||||
|
||||
.vagrant
|
||||
*.sublime-project
|
||||
# Output of go build ./cmd/fsnotify
|
||||
/fsnotify
|
||||
|
|
|
|||
|
|
@ -1,62 +0,0 @@
|
|||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# You can update this list using the following command:
|
||||
#
|
||||
# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Aaron L <aaron@bettercoder.net>
|
||||
Adrien Bustany <adrien@bustany.org>
|
||||
Alexey Kazakov <alkazako@redhat.com>
|
||||
Amit Krishnan <amit.krishnan@oracle.com>
|
||||
Anmol Sethi <me@anmol.io>
|
||||
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Case Nelson <case@teammating.com>
|
||||
Chris Howey <howeyc@gmail.com>
|
||||
Christoffer Buchholz <christoffer.buchholz@gmail.com>
|
||||
Daniel Wagner-Hall <dawagner@gmail.com>
|
||||
Dave Cheney <dave@cheney.net>
|
||||
Eric Lin <linxiulei@gmail.com>
|
||||
Evan Phoenix <evan@fallingsnow.net>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Gautam Dey <gautam.dey77@gmail.com>
|
||||
Hari haran <hariharan.uno@gmail.com>
|
||||
Ichinose Shogo <shogo82148@gmail.com>
|
||||
Johannes Ebke <johannes@ebke.org>
|
||||
John C Barstow <jbowtie@amathaine.com>
|
||||
Kelvin Fo <vmirage@gmail.com>
|
||||
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
|
||||
Matt Layher <mdlayher@gmail.com>
|
||||
Matthias Stone <matthias@bellstone.ca>
|
||||
Nathan Youngman <git@nathany.com>
|
||||
Nickolai Zeldovich <nickolai@csail.mit.edu>
|
||||
Oliver Bristow <evilumbrella+github@gmail.com>
|
||||
Patrick <patrick@dropbox.com>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Pawel Knap <pawelknap88@gmail.com>
|
||||
Pieter Droogendijk <pieter@binky.org.uk>
|
||||
Pratik Shinde <pratikshinde320@gmail.com>
|
||||
Pursuit92 <JoshChase@techpursuit.net>
|
||||
Riku Voipio <riku.voipio@linaro.org>
|
||||
Rob Figueiredo <robfig@gmail.com>
|
||||
Rodrigo Chiossi <rodrigochiossi@gmail.com>
|
||||
Slawek Ligus <root@ooz.ie>
|
||||
Soge Zhang <zhssoge@gmail.com>
|
||||
Tiffany Jernigan <tiffany.jernigan@intel.com>
|
||||
Tilak Sharma <tilaks@google.com>
|
||||
Tobias Klauser <tobias.klauser@gmail.com>
|
||||
Tom Payne <twpayne@gmail.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Tudor Golubenco <tudor.g@gmail.com>
|
||||
Vahe Khachikyan <vahe@live.ca>
|
||||
Yukang <moorekang@gmail.com>
|
||||
bronze1man <bronze1man@gmail.com>
|
||||
debrando <denis.brandolini@gmail.com>
|
||||
henrikedwards <henrik.edwards@gmail.com>
|
||||
铁哥 <guotie.9@gmail.com>
|
||||
|
|
@ -7,6 +7,95 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
Nothing yet.
|
||||
|
||||
## [1.6.0] - 2022-10-13
|
||||
|
||||
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
|
||||
but not documented). It also increases the minimum Linux version to 2.6.32.
|
||||
|
||||
### Additions
|
||||
|
||||
- all: add `Event.Has()` and `Op.Has()` ([#477])
|
||||
|
||||
This makes checking events a lot easier; for example:
|
||||
|
||||
if event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||
}
|
||||
|
||||
Becomes:
|
||||
|
||||
if event.Has(Write) && !event.Has(Remove) {
|
||||
}
|
||||
|
||||
- all: add cmd/fsnotify ([#463])
|
||||
|
||||
A command-line utility for testing and some examples.
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- inotify: don't ignore events for files that don't exist ([#260], [#470])
|
||||
|
||||
Previously the inotify watcher would call `os.Lstat()` to check if a file
|
||||
still exists before emitting events.
|
||||
|
||||
This was inconsistent with other platforms and resulted in inconsistent event
|
||||
reporting (e.g. when a file is quickly removed and re-created), and generally
|
||||
a source of confusion. It was added in 2013 to fix a memory leak that no
|
||||
longer exists.
|
||||
|
||||
- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's
|
||||
not watched ([#460])
|
||||
|
||||
- inotify: replace epoll() with non-blocking inotify ([#434])
|
||||
|
||||
Non-blocking inotify was not generally available at the time this library was
|
||||
written in 2014, but now it is. As a result, the minimum Linux version is
|
||||
bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster.
|
||||
|
||||
- kqueue: don't check for events every 100ms ([#480])
|
||||
|
||||
The watcher would wake up every 100ms, even when there was nothing to do. Now
|
||||
it waits until there is something to do.
|
||||
|
||||
- macos: retry opening files on EINTR ([#475])
|
||||
|
||||
- kqueue: skip unreadable files ([#479])
|
||||
|
||||
kqueue requires a file descriptor for every file in a directory; this would
|
||||
fail if a file was unreadable by the current user. Now these files are simply
|
||||
skipped.
|
||||
|
||||
- windows: fix renaming a watched directory if the parent is also watched ([#370])
|
||||
|
||||
- windows: increase buffer size from 4K to 64K ([#485])
|
||||
|
||||
- windows: close file handle on Remove() ([#288])
|
||||
|
||||
- kqueue: put pathname in the error if watching a file fails ([#471])
|
||||
|
||||
- inotify, windows: calling Close() more than once could race ([#465])
|
||||
|
||||
- kqueue: improve Close() performance ([#233])
|
||||
|
||||
- all: various documentation additions and clarifications.
|
||||
|
||||
[#233]: https://github.com/fsnotify/fsnotify/pull/233
|
||||
[#260]: https://github.com/fsnotify/fsnotify/pull/260
|
||||
[#288]: https://github.com/fsnotify/fsnotify/pull/288
|
||||
[#370]: https://github.com/fsnotify/fsnotify/pull/370
|
||||
[#434]: https://github.com/fsnotify/fsnotify/pull/434
|
||||
[#460]: https://github.com/fsnotify/fsnotify/pull/460
|
||||
[#463]: https://github.com/fsnotify/fsnotify/pull/463
|
||||
[#465]: https://github.com/fsnotify/fsnotify/pull/465
|
||||
[#470]: https://github.com/fsnotify/fsnotify/pull/470
|
||||
[#471]: https://github.com/fsnotify/fsnotify/pull/471
|
||||
[#475]: https://github.com/fsnotify/fsnotify/pull/475
|
||||
[#477]: https://github.com/fsnotify/fsnotify/pull/477
|
||||
[#479]: https://github.com/fsnotify/fsnotify/pull/479
|
||||
[#480]: https://github.com/fsnotify/fsnotify/pull/480
|
||||
[#485]: https://github.com/fsnotify/fsnotify/pull/485
|
||||
|
||||
## [1.5.4] - 2022-04-25
|
||||
|
||||
* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
|
||||
|
|
@ -40,6 +129,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
[#385](https://github.com/fsnotify/fsnotify/pull/385)
|
||||
* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
|
||||
|
||||
## [1.4.9] - 2020-03-11
|
||||
|
||||
* Move example usage to the readme #329. This may resolve #328.
|
||||
|
||||
## [1.4.8] - 2020-03-10
|
||||
|
||||
* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216)
|
||||
* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265)
|
||||
* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266)
|
||||
* CI: Less verbosity (@nathany #267)
|
||||
* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267)
|
||||
* Tests: Check if channels are closed in the example (@alexeykazakov #244)
|
||||
* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284)
|
||||
* CI: Add windows to travis matrix (@cpuguy83 #284)
|
||||
* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93)
|
||||
* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219)
|
||||
* Linux: open files with close-on-exec (@linxiulei #273)
|
||||
* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 )
|
||||
* Project: Add go.mod (@nathany #309)
|
||||
* Project: Revise editor config (@nathany #309)
|
||||
* Project: Update copyright for 2019 (@nathany #309)
|
||||
* CI: Drop go1.8 from CI matrix (@nathany #309)
|
||||
* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e )
|
||||
|
||||
## [1.4.7] - 2018-01-09
|
||||
|
||||
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
|
||||
|
|
|
|||
|
|
@ -1,60 +1,26 @@
|
|||
# Contributing
|
||||
Thank you for your interest in contributing to fsnotify! We try to review and
|
||||
merge PRs in a reasonable timeframe, but please be aware that:
|
||||
|
||||
## Issues
|
||||
- To avoid "wasted" work, please discus changes on the issue tracker first. You
|
||||
can just send PRs, but they may end up being rejected for one reason or the
|
||||
other.
|
||||
|
||||
* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
|
||||
* Please indicate the platform you are using fsnotify on.
|
||||
* A code example to reproduce the problem is appreciated.
|
||||
- fsnotify is a cross-platform library, and changes must work reasonably well on
|
||||
all supported platforms.
|
||||
|
||||
## Pull Requests
|
||||
- Changes will need to be compatible; old code should still compile, and the
|
||||
runtime behaviour can't change in ways that are likely to lead to problems for
|
||||
users.
|
||||
|
||||
### Contributor License Agreement
|
||||
Testing
|
||||
-------
|
||||
Just `go test ./...` runs all the tests; the CI runs this on all supported
|
||||
platforms. Testing different platforms locally can be done with something like
|
||||
[goon] or [Vagrant], but this isn't super-easy to set up at the moment.
|
||||
|
||||
fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
|
||||
Use the `-short` flag to make the "stress test" run faster.
|
||||
|
||||
Please indicate that you have signed the CLA in your pull request.
|
||||
|
||||
### How fsnotify is Developed
|
||||
|
||||
* Development is done on feature branches.
|
||||
* Tests are run on BSD, Linux, macOS and Windows.
|
||||
* Pull requests are reviewed and [applied to master][am] using [hub][].
|
||||
* Maintainers may modify or squash commits rather than asking contributors to.
|
||||
* To issue a new release, the maintainers will:
|
||||
* Update the CHANGELOG
|
||||
* Tag a version, which will become available through gopkg.in.
|
||||
|
||||
### How to Fork
|
||||
|
||||
For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
|
||||
|
||||
1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
|
||||
2. Create your feature branch (`git checkout -b my-new-feature`)
|
||||
3. Ensure everything works and the tests pass (see below)
|
||||
4. Commit your changes (`git commit -am 'Add some feature'`)
|
||||
|
||||
Contribute upstream:
|
||||
|
||||
1. Fork fsnotify on GitHub
|
||||
2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
|
||||
3. Push to the branch (`git push fork my-new-feature`)
|
||||
4. Create a new Pull Request on GitHub
|
||||
|
||||
This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
|
||||
|
||||
### Testing
|
||||
|
||||
fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
|
||||
|
||||
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
|
||||
|
||||
### Maintainers
|
||||
|
||||
Help maintaining fsnotify is welcome. To be a maintainer:
|
||||
|
||||
* Submit a pull request and sign the CLA as above.
|
||||
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
|
||||
|
||||
All code changes should be internal pull requests.
|
||||
|
||||
Releases are tagged using [Semantic Versioning](http://semver.org/).
|
||||
[goon]: https://github.com/arp242/goon
|
||||
[Vagrant]: https://www.vagrantup.com/
|
||||
[integration_test.go]: /integration_test.go
|
||||
|
|
|
|||
|
|
@ -1,28 +1,25 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2012-2019 fsnotify Authors. All rights reserved.
|
||||
Copyright © 2012 The Go Authors. All rights reserved.
|
||||
Copyright © fsnotify Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
* Neither the name of Google Inc. nor the names of its contributors may be used
|
||||
to endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
|
|||
|
|
@ -1,33 +1,33 @@
|
|||
# File system notifications for Go
|
||||
fsnotify is a Go library to provide cross-platform filesystem notifications on
|
||||
Windows, Linux, macOS, and BSD systems.
|
||||
|
||||
[](https://pkg.go.dev/github.com/fsnotify/fsnotify) [](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [](https://github.com/fsnotify/fsnotify/issues/413)
|
||||
Go 1.16 or newer is required; the full documentation is at
|
||||
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||
|
||||
fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library.
|
||||
**It's best to read the documentation at pkg.go.dev, as it's pinned to the last
|
||||
released version, whereas this README is for the last development version which
|
||||
may include additions/changes.**
|
||||
|
||||
Cross platform: Windows, Linux, BSD and macOS.
|
||||
---
|
||||
|
||||
Platform support:
|
||||
|
||||
| Adapter | OS | Status |
|
||||
| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| inotify | Linux 2.6.27 or later, Android\* | Supported |
|
||||
| kqueue | BSD, macOS, iOS\* | Supported |
|
||||
| --------------------- | ---------------| -------------------------------------------------------------|
|
||||
| inotify | Linux 2.6.32+ | Supported |
|
||||
| kqueue | BSD, macOS | Supported |
|
||||
| ReadDirectoryChangesW | Windows | Supported |
|
||||
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
|
||||
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) |
|
||||
| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| fanotify | Linux 5.9+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
|
||||
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||
|
||||
\* Android and iOS are untested.
|
||||
Linux and macOS should include Android and iOS, but these are currently untested.
|
||||
|
||||
Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
|
||||
|
||||
## API stability
|
||||
|
||||
fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
|
||||
|
||||
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/).
|
||||
|
||||
## Usage
|
||||
Usage
|
||||
-----
|
||||
A basic example:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
|
@ -39,13 +39,14 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
// Create new watcher.
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
done := make(chan bool)
|
||||
// Start listening for events.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
|
|
@ -54,7 +55,7 @@ func main() {
|
|||
return
|
||||
}
|
||||
log.Println("event:", event)
|
||||
if event.Op&fsnotify.Write == fsnotify.Write {
|
||||
if event.Has(fsnotify.Write) {
|
||||
log.Println("modified file:", event.Name)
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
|
|
@ -66,55 +67,95 @@ func main() {
|
|||
}
|
||||
}()
|
||||
|
||||
err = watcher.Add("/tmp/foo")
|
||||
// Add a path.
|
||||
err = watcher.Add("/tmp")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
<-done
|
||||
|
||||
// Block main goroutine forever.
|
||||
<-make(chan struct{})
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be
|
||||
run with:
|
||||
|
||||
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
|
||||
% go run ./cmd/fsnotify
|
||||
|
||||
## FAQ
|
||||
FAQ
|
||||
---
|
||||
### Will a file still be watched when it's moved to another directory?
|
||||
No, not unless you are watching the location it was moved to.
|
||||
|
||||
**When a file is moved to another directory is it still being watched?**
|
||||
### Are subdirectories watched too?
|
||||
No, you must add watches for any directory you want to watch (a recursive
|
||||
watcher is on the roadmap: [#18]).
|
||||
|
||||
No (it shouldn't be, unless you are watching where it was moved to).
|
||||
|
||||
**When I watch a directory, are all subdirectories watched as well?**
|
||||
|
||||
No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
|
||||
|
||||
**Do I have to watch the Error and Event channels in a separate goroutine?**
|
||||
|
||||
As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
|
||||
|
||||
**Why am I receiving multiple events for the same file on OS X?**
|
||||
|
||||
Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
|
||||
|
||||
**How many files can be watched at once?**
|
||||
|
||||
There are OS-specific limits as to how many watches can be created:
|
||||
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
|
||||
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
|
||||
|
||||
**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?**
|
||||
|
||||
fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications.
|
||||
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
||||
|
||||
### Do I have to watch the Error and Event channels in a goroutine?
|
||||
As of now, yes (you can read both channels in the same goroutine using `select`,
|
||||
you don't need a separate goroutine for both channels; see the example).
|
||||
|
||||
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
|
||||
fsnotify requires support from underlying OS to work. The current NFS and SMB
|
||||
protocols does not provide network level support for file notifications, and
|
||||
neither do the /proc and /sys virtual filesystems.
|
||||
|
||||
This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
|
||||
|
||||
[#9]: https://github.com/fsnotify/fsnotify/issues/9
|
||||
|
||||
Platform-specific notes
|
||||
-----------------------
|
||||
### Linux
|
||||
When a file is removed a REMOVE event won't be emitted until all file
|
||||
descriptors are closed; it will emit a CHMOD instead:
|
||||
|
||||
fp := os.Open("file")
|
||||
os.Remove("file") // CHMOD
|
||||
fp.Close() // REMOVE
|
||||
|
||||
This is the event that inotify sends, so not much can be changed about this.
|
||||
|
||||
The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for
|
||||
the number of watches per user, and `fs.inotify.max_user_instances` specifies
|
||||
the maximum number of inotify instances per user. Every Watcher you create is an
|
||||
"instance", and every path you add is a "watch".
|
||||
|
||||
These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and
|
||||
`/proc/sys/fs/inotify/max_user_instances`
|
||||
|
||||
To increase them you can use `sysctl` or write the value to proc file:
|
||||
|
||||
# The default values on Linux 5.18
|
||||
sysctl fs.inotify.max_user_watches=124983
|
||||
sysctl fs.inotify.max_user_instances=128
|
||||
|
||||
To make the changes persist on reboot edit `/etc/sysctl.conf` or
|
||||
`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your
|
||||
distro's documentation):
|
||||
|
||||
fs.inotify.max_user_watches=124983
|
||||
fs.inotify.max_user_instances=128
|
||||
|
||||
Reaching the limit will result in a "no space left on device" or "too many open
|
||||
files" error.
|
||||
|
||||
### kqueue (macOS, all BSD systems)
|
||||
kqueue requires opening a file descriptor for every file that's being watched;
|
||||
so if you're watching a directory with five files then that's six file
|
||||
descriptors. You will run in to your system's "max open files" limit faster on
|
||||
these platforms.
|
||||
|
||||
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
|
||||
control the maximum number of open files.
|
||||
|
||||
### macOS
|
||||
Spotlight indexing on macOS can result in multiple events (see [#15]). A temporary
|
||||
workaround is to add your folder(s) to the *Spotlight Privacy settings* until we
|
||||
have a native FSEvents implementation (see [#11]).
|
||||
|
||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
[#7]: https://github.com/howeyc/fsnotify/issues/7
|
||||
|
||||
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
|
||||
|
||||
## Related Projects
|
||||
|
||||
* [notify](https://github.com/rjeczalik/notify)
|
||||
* [fsevents](https://github.com/fsnotify/fsevents)
|
||||
|
||||
[#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
|
|
|
|||
|
|
@ -0,0 +1,162 @@
|
|||
//go:build solaris
|
||||
// +build solaris
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # macOS notes
|
||||
//
|
||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
||||
//
|
||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, so you
|
||||
// probably want to wait until you've stopped receiving
|
||||
// them (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// and on kqueue when a file is truncated. On Windows
|
||||
// it's never sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
func (w *Watcher) Add(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,459 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # macOS notes
|
||||
//
|
||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
||||
//
|
||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, so you
|
||||
// probably want to wait until you've stopped receiving
|
||||
// them (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// and on kqueue when a file is truncated. On Windows
|
||||
// it's never sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
Errors chan error
|
||||
|
||||
// Store fd here as os.File.Read() will no longer return on close after
|
||||
// calling Fd(). See: https://github.com/golang/go/issues/26439
|
||||
fd int
|
||||
mu sync.Mutex // Map access
|
||||
inotifyFile *os.File
|
||||
watches map[string]*watch // Map of inotify watches (key: path)
|
||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
// Create inotify fd
|
||||
// Need to set the FD to nonblocking mode in order for SetDeadline methods to work
|
||||
// Otherwise, blocking i/o operations won't terminate on close
|
||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
|
||||
if fd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
w := &Watcher{
|
||||
fd: fd,
|
||||
inotifyFile: os.NewFile(uintptr(fd), ""),
|
||||
watches: make(map[string]*watch),
|
||||
paths: make(map[int]string),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
doneResp: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Returns true if the event was sent, or false if watcher is closed.
|
||||
func (w *Watcher) sendEvent(e Event) bool {
|
||||
select {
|
||||
case w.Events <- e:
|
||||
return true
|
||||
case <-w.done:
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *Watcher) sendError(err error) bool {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
case <-w.done:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed() {
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
||||
close(w.done)
|
||||
w.mu.Unlock()
|
||||
|
||||
// Causes any blocking reads to return with an error, provided the file
|
||||
// still supports deadline operations.
|
||||
err := w.inotifyFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for goroutine to close
|
||||
<-w.doneResp
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
func (w *Watcher) Add(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
if w.isClosed() {
|
||||
return errors.New("inotify instance already closed")
|
||||
}
|
||||
|
||||
var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
||||
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
||||
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watchEntry := w.watches[name]
|
||||
if watchEntry != nil {
|
||||
flags |= watchEntry.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
|
||||
if wd == -1 {
|
||||
return errno
|
||||
}
|
||||
|
||||
if watchEntry == nil {
|
||||
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
||||
w.paths[wd] = name
|
||||
} else {
|
||||
watchEntry.wd = uint32(wd)
|
||||
watchEntry.flags = flags
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
|
||||
// Fetch the watch.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watch, ok := w.watches[name]
|
||||
|
||||
// Remove it from inotify.
|
||||
if !ok {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
}
|
||||
|
||||
// We successfully removed the watch if InotifyRmWatch doesn't return an
|
||||
// error, we need to clean up our internal state to ensure it matches
|
||||
// inotify's kernel state.
|
||||
delete(w.paths, int(watch.wd))
|
||||
delete(w.watches, name)
|
||||
|
||||
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
||||
// the inotify will already have been removed.
|
||||
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
|
||||
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
|
||||
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
|
||||
// by another thread and we have not received IN_IGNORE event.
|
||||
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
|
||||
if success == -1 {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every case;
|
||||
// The only two possible errors are:
|
||||
//
|
||||
// - EBADF, which happens when w.fd is not a valid file descriptor
|
||||
// of any kind.
|
||||
// - EINVAL, which is when fd is not an inotify descriptor or wd
|
||||
// is not a valid watch descriptor. Watch descriptors are
|
||||
// invalidated when they are removed explicitly or implicitly;
|
||||
// explicitly by inotify_rm_watch, implicitly when the file they
|
||||
// are watching is deleted.
|
||||
return errno
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
||||
func (w *Watcher) WatchList() []string {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for pathname := range w.watches {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Events channel
|
||||
func (w *Watcher) readEvents() {
|
||||
defer func() {
|
||||
close(w.doneResp)
|
||||
close(w.Errors)
|
||||
close(w.Events)
|
||||
}()
|
||||
|
||||
var (
|
||||
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
errno error // Syscall errno
|
||||
)
|
||||
for {
|
||||
// See if we have been closed.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := w.inotifyFile.Read(buf[:])
|
||||
switch {
|
||||
case errors.Unwrap(err) == os.ErrClosed:
|
||||
return
|
||||
case err != nil:
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if n < unix.SizeofInotifyEvent {
|
||||
var err error
|
||||
if n == 0 {
|
||||
// If EOF is received. This should really never happen.
|
||||
err = io.EOF
|
||||
} else if n < 0 {
|
||||
// If an error occurred while reading.
|
||||
err = errno
|
||||
} else {
|
||||
// Read was too short.
|
||||
err = errors.New("notify: short read in readEvents()")
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
// We don't know how many events we just read into the buffer
|
||||
// While the offset points to at least one whole event...
|
||||
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||
var (
|
||||
// Point "raw" to the event in the buffer
|
||||
raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
mask = uint32(raw.Mask)
|
||||
nameLen = uint32(raw.Len)
|
||||
)
|
||||
|
||||
if mask&unix.IN_Q_OVERFLOW != 0 {
|
||||
if !w.sendError(ErrEventOverflow) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If the event happened to the watched directory or the watched file, the kernel
|
||||
// doesn't append the filename to the event, but we would like to always fill the
|
||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||
// the "paths" map.
|
||||
w.mu.Lock()
|
||||
name, ok := w.paths[int(raw.Wd)]
|
||||
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
|
||||
// This is a sign to clean up the maps, otherwise we are no longer in sync
|
||||
// with the inotify kernel state which has already deleted the watch
|
||||
// automatically.
|
||||
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
delete(w.paths, int(raw.Wd))
|
||||
delete(w.watches, name)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
event := w.newEvent(name, mask)
|
||||
|
||||
// Send the events that are not ignored on the events channel
|
||||
if mask&unix.IN_IGNORED == 0 {
|
||||
if !w.sendEvent(event) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += unix.SizeofInotifyEvent + nameLen
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on an inotify mask.
|
||||
func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
|
@ -0,0 +1,707 @@
|
|||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||
// +build freebsd openbsd netbsd dragonfly darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # macOS notes
|
||||
//
|
||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
||||
//
|
||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, so you
|
||||
// probably want to wait until you've stopped receiving
|
||||
// them (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// and on kqueue when a file is truncated. On Windows
|
||||
// it's never sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
Errors chan error
|
||||
|
||||
done chan struct{}
|
||||
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||
closepipe [2]int // Pipe used for closing.
|
||||
mu sync.Mutex // Protects access to watcher data
|
||||
watches map[string]int // Watched file descriptors (key: path).
|
||||
watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)).
|
||||
userWatches map[string]struct{} // Watches added with Watcher.Add()
|
||||
dirFlags map[string]uint32 // Watched directories to fflags used in kqueue.
|
||||
paths map[int]pathInfo // File descriptors to path names for processing kqueue events.
|
||||
fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events).
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
type pathInfo struct {
|
||||
name string
|
||||
isDir bool
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
kq, closepipe, err := newKqueue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &Watcher{
|
||||
kq: kq,
|
||||
closepipe: closepipe,
|
||||
watches: make(map[string]int),
|
||||
watchesByDir: make(map[string]map[int]struct{}),
|
||||
dirFlags: make(map[string]uint32),
|
||||
paths: make(map[int]pathInfo),
|
||||
fileExists: make(map[string]struct{}),
|
||||
userWatches: make(map[string]struct{}),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// newKqueue creates a new kernel event queue and returns a descriptor.
|
||||
//
|
||||
// This registers a new event on closepipe, which will trigger an event when
|
||||
// it's closed. This way we can use kevent() without timeout/polling; without
|
||||
// the closepipe, it would block forever and we wouldn't be able to stop it at
|
||||
// all.
|
||||
func newKqueue() (kq int, closepipe [2]int, err error) {
|
||||
kq, err = unix.Kqueue()
|
||||
if kq == -1 {
|
||||
return kq, closepipe, err
|
||||
}
|
||||
|
||||
// Register the close pipe.
|
||||
err = unix.Pipe(closepipe[:])
|
||||
if err != nil {
|
||||
unix.Close(kq)
|
||||
return kq, closepipe, err
|
||||
}
|
||||
|
||||
// Register changes to listen on the closepipe.
|
||||
changes := make([]unix.Kevent_t, 1)
|
||||
// SetKevent converts int to the platform-specific types.
|
||||
unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ,
|
||||
unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT)
|
||||
|
||||
ok, err := unix.Kevent(kq, changes, nil, nil)
|
||||
if ok == -1 {
|
||||
unix.Close(kq)
|
||||
unix.Close(closepipe[0])
|
||||
unix.Close(closepipe[1])
|
||||
return kq, closepipe, err
|
||||
}
|
||||
return kq, closepipe, nil
|
||||
}
|
||||
|
||||
// Returns true if the event was sent, or false if watcher is closed.
|
||||
func (w *Watcher) sendEvent(e Event) bool {
|
||||
select {
|
||||
case w.Events <- e:
|
||||
return true
|
||||
case <-w.done:
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *Watcher) sendError(err error) bool {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
case <-w.done:
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// copy paths to remove while locked
|
||||
pathsToRemove := make([]string, 0, len(w.watches))
|
||||
for name := range w.watches {
|
||||
pathsToRemove = append(pathsToRemove, name)
|
||||
}
|
||||
w.mu.Unlock() // Unlock before calling Remove, which also locks
|
||||
for _, name := range pathsToRemove {
|
||||
w.Remove(name)
|
||||
}
|
||||
|
||||
// Send "quit" message to the reader goroutine.
|
||||
unix.Close(w.closepipe[1])
|
||||
close(w.done)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
func (w *Watcher) Add(name string) error {
|
||||
w.mu.Lock()
|
||||
w.userWatches[name] = struct{}{}
|
||||
w.mu.Unlock()
|
||||
_, err := w.addWatch(name, noteAllEvents)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
w.mu.Lock()
|
||||
watchfd, ok := w.watches[name]
|
||||
w.mu.Unlock()
|
||||
if !ok {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
}
|
||||
|
||||
err := w.register([]int{watchfd}, unix.EV_DELETE, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unix.Close(watchfd)
|
||||
|
||||
w.mu.Lock()
|
||||
isDir := w.paths[watchfd].isDir
|
||||
delete(w.watches, name)
|
||||
delete(w.userWatches, name)
|
||||
|
||||
parentName := filepath.Dir(name)
|
||||
delete(w.watchesByDir[parentName], watchfd)
|
||||
|
||||
if len(w.watchesByDir[parentName]) == 0 {
|
||||
delete(w.watchesByDir, parentName)
|
||||
}
|
||||
|
||||
delete(w.paths, watchfd)
|
||||
delete(w.dirFlags, name)
|
||||
delete(w.fileExists, name)
|
||||
w.mu.Unlock()
|
||||
|
||||
// Find all watched paths that are in this directory that are not external.
|
||||
if isDir {
|
||||
var pathsToRemove []string
|
||||
w.mu.Lock()
|
||||
for fd := range w.watchesByDir[name] {
|
||||
path := w.paths[fd]
|
||||
if _, ok := w.userWatches[path.name]; !ok {
|
||||
pathsToRemove = append(pathsToRemove, path.name)
|
||||
}
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, name := range pathsToRemove {
|
||||
// Since these are internal, not much sense in propagating error
|
||||
// to the user, as that will just confuse them with an error about
|
||||
// a path they did not explicitly watch themselves.
|
||||
w.Remove(name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
||||
func (w *Watcher) WatchList() []string {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.userWatches))
|
||||
for pathname := range w.userWatches {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||
|
||||
// addWatch adds name to the watched file set.
|
||||
// The flags are interpreted as described in kevent(2).
|
||||
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
|
||||
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||
var isDir bool
|
||||
// Make ./name and name equivalent
|
||||
name = filepath.Clean(name)
|
||||
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return "", errors.New("kevent instance already closed")
|
||||
}
|
||||
watchfd, alreadyWatching := w.watches[name]
|
||||
// We already have a watch, but we can still override flags.
|
||||
if alreadyWatching {
|
||||
isDir = w.paths[watchfd].isDir
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if !alreadyWatching {
|
||||
fi, err := os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Don't watch sockets or named pipes
|
||||
if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Follow Symlinks
|
||||
//
|
||||
// Linux can add unresolvable symlinks to the watch list without issue,
|
||||
// and Windows can't do symlinks period. To maintain consistency, we
|
||||
// will act like everything is fine if the link can't be resolved.
|
||||
// There will simply be no file events for broken symlinks. Hence the
|
||||
// returns of nil on errors.
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
name, err = filepath.EvalSymlinks(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
_, alreadyWatching = w.watches[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
if alreadyWatching {
|
||||
return name, nil
|
||||
}
|
||||
|
||||
fi, err = os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
// Retry on EINTR; open() can return EINTR in practice on macOS.
|
||||
// See #354, and go issues 11180 and 39237.
|
||||
for {
|
||||
watchfd, err = unix.Open(name, openMode, 0)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if errors.Is(err, unix.EINTR) {
|
||||
continue
|
||||
}
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
isDir = fi.IsDir()
|
||||
}
|
||||
|
||||
err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
|
||||
if err != nil {
|
||||
unix.Close(watchfd)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !alreadyWatching {
|
||||
w.mu.Lock()
|
||||
parentName := filepath.Dir(name)
|
||||
w.watches[name] = watchfd
|
||||
|
||||
watchesByDir, ok := w.watchesByDir[parentName]
|
||||
if !ok {
|
||||
watchesByDir = make(map[int]struct{}, 1)
|
||||
w.watchesByDir[parentName] = watchesByDir
|
||||
}
|
||||
watchesByDir[watchfd] = struct{}{}
|
||||
|
||||
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if isDir {
|
||||
// Watch the directory if it has not been watched before,
|
||||
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||
w.mu.Lock()
|
||||
|
||||
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
|
||||
// Store flags so this watch can be updated later
|
||||
w.dirFlags[name] = flags
|
||||
w.mu.Unlock()
|
||||
|
||||
if watchDir {
|
||||
if err := w.watchDirectoryFiles(name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// readEvents reads from kqueue and converts the received kevents into
|
||||
// Event values that it sends down the Events channel.
|
||||
func (w *Watcher) readEvents() {
|
||||
defer func() {
|
||||
err := unix.Close(w.kq)
|
||||
if err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
unix.Close(w.closepipe[0])
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
}()
|
||||
|
||||
eventBuffer := make([]unix.Kevent_t, 10)
|
||||
for closed := false; !closed; {
|
||||
kevents, err := w.read(eventBuffer)
|
||||
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||
if err != nil && err != unix.EINTR {
|
||||
if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
|
||||
closed = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Flush the events we received to the Events channel
|
||||
for _, kevent := range kevents {
|
||||
var (
|
||||
watchfd = int(kevent.Ident)
|
||||
mask = uint32(kevent.Fflags)
|
||||
)
|
||||
|
||||
// Shut down the loop when the pipe is closed, but only after all
|
||||
// other events have been processed.
|
||||
if watchfd == w.closepipe[0] {
|
||||
closed = true
|
||||
continue
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
path := w.paths[watchfd]
|
||||
w.mu.Unlock()
|
||||
|
||||
event := w.newEvent(path.name, mask)
|
||||
|
||||
if path.isDir && !event.Has(Remove) {
|
||||
// Double check to make sure the directory exists. This can
|
||||
// happen when we do a rm -fr on a recursively watched folders
|
||||
// and we receive a modification event first but the folder has
|
||||
// been deleted and later receive the delete event.
|
||||
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
||||
event.Op |= Remove
|
||||
}
|
||||
}
|
||||
|
||||
if event.Has(Rename) || event.Has(Remove) {
|
||||
w.Remove(event.Name)
|
||||
w.mu.Lock()
|
||||
delete(w.fileExists, event.Name)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if path.isDir && event.Has(Write) && !event.Has(Remove) {
|
||||
w.sendDirectoryChangeEvents(event.Name)
|
||||
} else {
|
||||
if !w.sendEvent(event) {
|
||||
closed = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if event.Has(Remove) {
|
||||
// Look for a file that may have overwritten this.
|
||||
// For example, mv f1 f2 will delete f2, then create f2.
|
||||
if path.isDir {
|
||||
fileDir := filepath.Clean(event.Name)
|
||||
w.mu.Lock()
|
||||
_, found := w.watches[fileDir]
|
||||
w.mu.Unlock()
|
||||
if found {
|
||||
// make sure the directory exists before we watch for changes. When we
|
||||
// do a recursive watch and perform rm -fr, the parent directory might
|
||||
// have gone missing, ignore the missing directory and let the
|
||||
// upcoming delete event remove the watch from the parent directory.
|
||||
if _, err := os.Lstat(fileDir); err == nil {
|
||||
w.sendDirectoryChangeEvents(fileDir)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
filePath := filepath.Clean(event.Name)
|
||||
if fileInfo, err := os.Lstat(filePath); err == nil {
|
||||
w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||
func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, fileInfo := range files {
|
||||
path := filepath.Join(dirPath, fileInfo.Name())
|
||||
|
||||
cleanPath, err := w.internalWatch(path, fileInfo)
|
||||
if err != nil {
|
||||
// No permission to read the file; that's not a problem: just skip.
|
||||
// But do add it to w.fileExists to prevent it from being picked up
|
||||
// as a "new" file later (it still shows up in the directory
|
||||
// listing).
|
||||
switch {
|
||||
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
|
||||
cleanPath = filepath.Clean(path)
|
||||
default:
|
||||
return fmt.Errorf("%q: %w", filepath.Join(dirPath, fileInfo.Name()), err)
|
||||
}
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[cleanPath] = struct{}{}
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Search the directory for new files and send an event for them.
|
||||
//
|
||||
// This functionality is to have the BSD watcher match the inotify, which sends
|
||||
// a create event for files created in a watched directory.
|
||||
func (w *Watcher) sendDirectoryChangeEvents(dir string) {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
if !w.sendError(fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Search for new files
|
||||
for _, fi := range files {
|
||||
err := w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
|
||||
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
|
||||
w.mu.Lock()
|
||||
_, doesExist := w.fileExists[filePath]
|
||||
w.mu.Unlock()
|
||||
if !doesExist {
|
||||
if !w.sendEvent(Event{Name: filePath, Op: Create}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// like watchDirectoryFiles (but without doing another ReadDir)
|
||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = struct{}{}
|
||||
w.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
|
||||
if fileInfo.IsDir() {
|
||||
// mimic Linux providing delete events for subdirectories
|
||||
// but preserve the flags used if currently watching subdirectory
|
||||
w.mu.Lock()
|
||||
flags := w.dirFlags[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
|
||||
return w.addWatch(name, flags)
|
||||
}
|
||||
|
||||
// watch file to mimic Linux inotify
|
||||
return w.addWatch(name, noteAllEvents)
|
||||
}
|
||||
|
||||
// Register events with the queue.
|
||||
func (w *Watcher) register(fds []int, flags int, fflags uint32) error {
|
||||
changes := make([]unix.Kevent_t, len(fds))
|
||||
for i, fd := range fds {
|
||||
// SetKevent converts int to the platform-specific types.
|
||||
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
|
||||
changes[i].Fflags = fflags
|
||||
}
|
||||
|
||||
// Register the events.
|
||||
success, err := unix.Kevent(w.kq, changes, nil, nil)
|
||||
if success == -1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// read retrieves pending events, or waits until an event occurs.
|
||||
func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
|
||||
n, err := unix.Kevent(w.kq, nil, events, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return events[0:n], nil
|
||||
}
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
|
||||
// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct{}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
func (w *Watcher) Add(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,746 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # macOS notes
|
||||
//
|
||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
||||
//
|
||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
type Watcher struct {
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, so you
|
||||
// probably want to wait until you've stopped receiving
|
||||
// them (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// and on kqueue when a file is truncated. On Windows
|
||||
// it's never sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
Errors chan error
|
||||
|
||||
port windows.Handle // Handle to completion port
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
quit chan chan<- error
|
||||
|
||||
mu sync.Mutex // Protects access to watches, isClosed
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
|
||||
}
|
||||
w := &Watcher{
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
Events: make(chan Event, 50),
|
||||
Errors: make(chan error),
|
||||
quit: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
event := w.newEvent(name, uint32(mask))
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.quit <- ch
|
||||
case w.Events <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *Watcher) sendError(err error) bool {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
case <-w.quit:
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
w.mu.Unlock()
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.quit <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
func (w *Watcher) Add(name string) error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return errors.New("watcher already closed")
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sysFSALLEVENTS,
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(name),
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
||||
func (w *Watcher) WatchList() []string {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for _, entry := range w.watches {
|
||||
for _, watchEntry := range entry {
|
||||
entries = append(entries, watchEntry.path)
|
||||
}
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
// These options are from the old golang.org/x/exp/winfsnotify, where you could
|
||||
// add various options to the watch. This has long since been removed.
|
||||
//
|
||||
// The "sys" in the name is misleading as they're not part of any "system".
|
||||
//
|
||||
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
|
||||
const (
|
||||
sysFSALLEVENTS = 0xfff
|
||||
sysFSATTRIB = 0x4
|
||||
sysFSCREATE = 0x100
|
||||
sysFSDELETE = 0x200
|
||||
sysFSDELETESELF = 0x400
|
||||
sysFSMODIFY = 0x2
|
||||
sysFSMOVE = 0xc0
|
||||
sysFSMOVEDFROM = 0x40
|
||||
sysFSMOVEDTO = 0x80
|
||||
sysFSMOVESELF = 0x800
|
||||
sysFSIGNORED = 0x8000
|
||||
)
|
||||
|
||||
func (w *Watcher) newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&sysFSMODIFY == sysFSMODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&sysFSATTRIB == sysFSATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
const (
|
||||
opAddWatch = iota
|
||||
opRemoveWatch
|
||||
)
|
||||
|
||||
const (
|
||||
provisional uint64 = 1 << (32 + iota)
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
handle windows.Handle
|
||||
volume uint32
|
||||
index uint64
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov windows.Overlapped
|
||||
ino *inode // i-number
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf [65536]byte // 64K buffer
|
||||
}
|
||||
|
||||
type (
|
||||
indexMap map[uint64]*watch
|
||||
watchMap map[uint32]indexMap
|
||||
)
|
||||
|
||||
func (w *Watcher) wakeupReader() error {
|
||||
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) getDir(pathname string) (dir string, err error) {
|
||||
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
|
||||
if err != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", err)
|
||||
}
|
||||
if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
dir = pathname
|
||||
} else {
|
||||
dir, _ = filepath.Split(pathname)
|
||||
dir = filepath.Clean(dir)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *Watcher) getIno(path string) (ino *inode, err error) {
|
||||
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
|
||||
windows.FILE_LIST_DIRECTORY,
|
||||
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
|
||||
nil, windows.OPEN_EXISTING,
|
||||
windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("CreateFile", err)
|
||||
}
|
||||
|
||||
var fi windows.ByHandleFileInformation
|
||||
err = windows.GetFileInformationByHandle(h, &fi)
|
||||
if err != nil {
|
||||
windows.CloseHandle(h)
|
||||
return nil, os.NewSyscallError("GetFileInformationByHandle", err)
|
||||
}
|
||||
ino = &inode{
|
||||
handle: h,
|
||||
volume: fi.VolumeSerialNumber,
|
||||
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||
}
|
||||
return ino, nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) get(ino *inode) *watch {
|
||||
if i := m[ino.volume]; i != nil {
|
||||
return i[ino.index]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) set(ino *inode, watch *watch) {
|
||||
i := m[ino.volume]
|
||||
if i == nil {
|
||||
i = make(indexMap)
|
||||
m[ino.volume] = i
|
||||
}
|
||||
i[ino.index] = watch
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
dir, err := w.getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ino, err := w.getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watchEntry := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watchEntry == nil {
|
||||
_, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0)
|
||||
if err != nil {
|
||||
windows.CloseHandle(ino.handle)
|
||||
return os.NewSyscallError("CreateIoCompletionPort", err)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
w.mu.Unlock()
|
||||
flags |= provisional
|
||||
} else {
|
||||
windows.CloseHandle(ino.handle)
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask |= flags
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||
}
|
||||
|
||||
err = w.startRead(watchEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pathname == dir {
|
||||
watchEntry.mask &= ^provisional
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) remWatch(pathname string) error {
|
||||
dir, err := w.getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ino, err := w.getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
|
||||
err = windows.CloseHandle(ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CloseHandle", err))
|
||||
}
|
||||
if watch == nil {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
|
||||
return w.startRead(watch)
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) startRead(watch *watch) error {
|
||||
err := windows.CancelIo(watch.ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CancelIo", err))
|
||||
w.deleteWatch(watch)
|
||||
}
|
||||
mask := w.toWindowsFlags(watch.mask)
|
||||
for _, m := range watch.names {
|
||||
mask |= w.toWindowsFlags(m)
|
||||
}
|
||||
if mask == 0 {
|
||||
err := windows.CloseHandle(watch.ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CloseHandle", err))
|
||||
}
|
||||
w.mu.Lock()
|
||||
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
rdErr := windows.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||
if rdErr != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
|
||||
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Events channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
n uint32
|
||||
key uintptr
|
||||
ov *windows.Overlapped
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
|
||||
// This error is handled after the watch == nil check below. NOTE: this
|
||||
// seems odd, note sure if it's correct.
|
||||
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, index := range indexes {
|
||||
for _, watch := range index {
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
}
|
||||
}
|
||||
|
||||
err := windows.CloseHandle(w.port)
|
||||
if err != nil {
|
||||
err = os.NewSyscallError("CloseHandle", err)
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
ch <- err
|
||||
return
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch qErr {
|
||||
case windows.ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
|
||||
} else {
|
||||
// The i/o succeeded but the buffer is full.
|
||||
// In theory we should be building up a full packet.
|
||||
// In practice we can get away with just carrying on.
|
||||
n = uint32(unsafe.Sizeof(watch.buf))
|
||||
}
|
||||
case windows.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
case windows.ERROR_OPERATION_ABORTED:
|
||||
// CancelIo was called on this handle
|
||||
continue
|
||||
default:
|
||||
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
|
||||
continue
|
||||
case nil:
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.sendError(errors.New("short read in readEvents()"))
|
||||
break
|
||||
}
|
||||
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||
|
||||
// Create a buf that is the size of the path name
|
||||
size := int(raw.FileNameLength / 2)
|
||||
var buf []uint16
|
||||
// TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973
|
||||
sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||
sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
|
||||
sh.Len = size
|
||||
sh.Cap = size
|
||||
name := windows.UTF16ToString(buf)
|
||||
fullname := filepath.Join(watch.path, name)
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case windows.FILE_ACTION_REMOVED:
|
||||
mask = sysFSDELETESELF
|
||||
case windows.FILE_ACTION_MODIFIED:
|
||||
mask = sysFSMODIFY
|
||||
case windows.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
watch.rename = name
|
||||
case windows.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
// Update saved path of all sub-watches.
|
||||
old := filepath.Join(watch.path, watch.rename)
|
||||
w.mu.Lock()
|
||||
for _, watchMap := range w.watches {
|
||||
for _, ww := range watchMap {
|
||||
if strings.HasPrefix(ww.path, old) {
|
||||
ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old))
|
||||
}
|
||||
}
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if watch.names[watch.rename] != 0 {
|
||||
watch.names[name] |= watch.names[watch.rename]
|
||||
delete(watch.names, watch.rename)
|
||||
mask = sysFSMOVESELF
|
||||
}
|
||||
}
|
||||
|
||||
sendNameEvent := func() {
|
||||
w.sendEvent(fullname, watch.names[name]&mask)
|
||||
}
|
||||
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
sendNameEvent()
|
||||
}
|
||||
if raw.Action == windows.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
|
||||
w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
fullname = filepath.Join(watch.path, watch.rename)
|
||||
sendNameEvent()
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
if raw.NextEntryOffset == 0 {
|
||||
break
|
||||
}
|
||||
offset += raw.NextEntryOffset
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
w.sendError(errors.New(
|
||||
"Windows system assumed buffer larger than it is, events have likely been missed."))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.startRead(watch); err != nil {
|
||||
w.sendError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sysFSMODIFY != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&sysFSATTRIB != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||
}
|
||||
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (w *Watcher) toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case windows.FILE_ACTION_ADDED:
|
||||
return sysFSCREATE
|
||||
case windows.FILE_ACTION_REMOVED:
|
||||
return sysFSDELETE
|
||||
case windows.FILE_ACTION_MODIFIED:
|
||||
return sysFSMODIFY
|
||||
case windows.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
return sysFSMOVEDFROM
|
||||
case windows.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
return sysFSMOVEDTO
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
|
@ -1,38 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build solaris
|
||||
// +build solaris
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,29 +1,37 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package fsnotify provides a platform-independent interface for file system notifications.
|
||||
// Package fsnotify provides a cross-platform interface for file system
|
||||
// notifications.
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Event represents a single file system notification.
|
||||
// Event represents a file system notification.
|
||||
type Event struct {
|
||||
Name string // Relative path to the file or directory.
|
||||
Op Op // File operation that triggered the event.
|
||||
// Path to the file or directory.
|
||||
//
|
||||
// Paths are relative to the input; for example with Add("dir") the Name
|
||||
// will be set to "dir/file" if you create that file, but if you use
|
||||
// Add("/path/to/dir") it will be "/path/to/dir/file".
|
||||
Name string
|
||||
|
||||
// File operation that triggered the event.
|
||||
//
|
||||
// This is a bitmask and some systems may send multiple operations at once.
|
||||
// Use the Event.Has() method instead of comparing with ==.
|
||||
Op Op
|
||||
}
|
||||
|
||||
// Op describes a set of file operations.
|
||||
type Op uint32
|
||||
|
||||
// These are the generalized file operations that can trigger a notification.
|
||||
// The operations fsnotify can trigger; see the documentation on [Watcher] for a
|
||||
// full description, and check them with [Event.Has].
|
||||
const (
|
||||
Create Op = 1 << iota
|
||||
Write
|
||||
|
|
@ -32,38 +40,42 @@ const (
|
|||
Chmod
|
||||
)
|
||||
|
||||
func (op Op) String() string {
|
||||
// Use a buffer for efficient string concatenation
|
||||
var buffer bytes.Buffer
|
||||
|
||||
if op&Create == Create {
|
||||
buffer.WriteString("|CREATE")
|
||||
}
|
||||
if op&Remove == Remove {
|
||||
buffer.WriteString("|REMOVE")
|
||||
}
|
||||
if op&Write == Write {
|
||||
buffer.WriteString("|WRITE")
|
||||
}
|
||||
if op&Rename == Rename {
|
||||
buffer.WriteString("|RENAME")
|
||||
}
|
||||
if op&Chmod == Chmod {
|
||||
buffer.WriteString("|CHMOD")
|
||||
}
|
||||
if buffer.Len() == 0 {
|
||||
return ""
|
||||
}
|
||||
return buffer.String()[1:] // Strip leading pipe
|
||||
}
|
||||
|
||||
// String returns a string representation of the event in the form
|
||||
// "file: REMOVE|WRITE|..."
|
||||
func (e Event) String() string {
|
||||
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
|
||||
}
|
||||
|
||||
// Common errors that can be reported by a watcher
|
||||
var (
|
||||
ErrNonExistentWatch = errors.New("can't remove non-existent watcher")
|
||||
ErrEventOverflow = errors.New("fsnotify queue overflow")
|
||||
)
|
||||
|
||||
func (op Op) String() string {
|
||||
var b strings.Builder
|
||||
if op.Has(Create) {
|
||||
b.WriteString("|CREATE")
|
||||
}
|
||||
if op.Has(Remove) {
|
||||
b.WriteString("|REMOVE")
|
||||
}
|
||||
if op.Has(Write) {
|
||||
b.WriteString("|WRITE")
|
||||
}
|
||||
if op.Has(Rename) {
|
||||
b.WriteString("|RENAME")
|
||||
}
|
||||
if op.Has(Chmod) {
|
||||
b.WriteString("|CHMOD")
|
||||
}
|
||||
if b.Len() == 0 {
|
||||
return "[no events]"
|
||||
}
|
||||
return b.String()[1:]
|
||||
}
|
||||
|
||||
// Has reports if this operation has the given operation.
|
||||
func (o Op) Has(h Op) bool { return o&h == h }
|
||||
|
||||
// Has reports if this event has the given operation.
|
||||
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
|
||||
|
||||
// String returns a string representation of the event with their path.
|
||||
func (e Event) String() string {
|
||||
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,36 +0,0 @@
|
|||
// Copyright 2022 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows
|
||||
// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct{}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,351 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
mu sync.Mutex // Map access
|
||||
fd int
|
||||
poller *fdPoller
|
||||
watches map[string]*watch // Map of inotify watches (key: path)
|
||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
// Create inotify fd
|
||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
|
||||
if fd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create epoll
|
||||
poller, err := newFdPoller(fd)
|
||||
if err != nil {
|
||||
unix.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
w := &Watcher{
|
||||
fd: fd,
|
||||
poller: poller,
|
||||
watches: make(map[string]*watch),
|
||||
paths: make(map[int]string),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
doneResp: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
||||
close(w.done)
|
||||
|
||||
// Wake up goroutine
|
||||
w.poller.wake()
|
||||
|
||||
// Wait for goroutine to close
|
||||
<-w.doneResp
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
if w.isClosed() {
|
||||
return errors.New("inotify instance already closed")
|
||||
}
|
||||
|
||||
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
||||
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
||||
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
|
||||
var flags uint32 = agnosticEvents
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watchEntry := w.watches[name]
|
||||
if watchEntry != nil {
|
||||
flags |= watchEntry.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
|
||||
if wd == -1 {
|
||||
return errno
|
||||
}
|
||||
|
||||
if watchEntry == nil {
|
||||
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
||||
w.paths[wd] = name
|
||||
} else {
|
||||
watchEntry.wd = uint32(wd)
|
||||
watchEntry.flags = flags
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
|
||||
// Fetch the watch.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watch, ok := w.watches[name]
|
||||
|
||||
// Remove it from inotify.
|
||||
if !ok {
|
||||
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
|
||||
}
|
||||
|
||||
// We successfully removed the watch if InotifyRmWatch doesn't return an
|
||||
// error, we need to clean up our internal state to ensure it matches
|
||||
// inotify's kernel state.
|
||||
delete(w.paths, int(watch.wd))
|
||||
delete(w.watches, name)
|
||||
|
||||
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
||||
// the inotify will already have been removed.
|
||||
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
|
||||
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
|
||||
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
|
||||
// by another thread and we have not received IN_IGNORE event.
|
||||
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
|
||||
if success == -1 {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every case.
|
||||
// the only two possible errors are:
|
||||
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
|
||||
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
|
||||
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
|
||||
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
|
||||
return errno
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchList returns the directories and files that are being monitered.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for pathname := range w.watches {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Events channel
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
n int // Number of bytes read with read()
|
||||
errno error // Syscall errno
|
||||
ok bool // For poller.wait
|
||||
)
|
||||
|
||||
defer close(w.doneResp)
|
||||
defer close(w.Errors)
|
||||
defer close(w.Events)
|
||||
defer unix.Close(w.fd)
|
||||
defer w.poller.close()
|
||||
|
||||
for {
|
||||
// See if we have been closed.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
ok, errno = w.poller.wait()
|
||||
if errno != nil {
|
||||
select {
|
||||
case w.Errors <- errno:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
n, errno = unix.Read(w.fd, buf[:])
|
||||
// If a signal interrupted execution, see if we've been asked to close, and try again.
|
||||
// http://man7.org/linux/man-pages/man7/signal.7.html :
|
||||
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
|
||||
if errno == unix.EINTR {
|
||||
continue
|
||||
}
|
||||
|
||||
// unix.Read might have been woken up by Close. If so, we're done.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
if n < unix.SizeofInotifyEvent {
|
||||
var err error
|
||||
if n == 0 {
|
||||
// If EOF is received. This should really never happen.
|
||||
err = io.EOF
|
||||
} else if n < 0 {
|
||||
// If an error occurred while reading.
|
||||
err = errno
|
||||
} else {
|
||||
// Read was too short.
|
||||
err = errors.New("notify: short read in readEvents()")
|
||||
}
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
// We don't know how many events we just read into the buffer
|
||||
// While the offset points to at least one whole event...
|
||||
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
|
||||
mask := uint32(raw.Mask)
|
||||
nameLen := uint32(raw.Len)
|
||||
|
||||
if mask&unix.IN_Q_OVERFLOW != 0 {
|
||||
select {
|
||||
case w.Errors <- ErrEventOverflow:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If the event happened to the watched directory or the watched file, the kernel
|
||||
// doesn't append the filename to the event, but we would like to always fill the
|
||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||
// the "paths" map.
|
||||
w.mu.Lock()
|
||||
name, ok := w.paths[int(raw.Wd)]
|
||||
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
|
||||
// This is a sign to clean up the maps, otherwise we are no longer in sync
|
||||
// with the inotify kernel state which has already deleted the watch
|
||||
// automatically.
|
||||
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
delete(w.paths, int(raw.Wd))
|
||||
delete(w.watches, name)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
event := newEvent(name, mask)
|
||||
|
||||
// Send the events that are not ignored on the events channel
|
||||
if !event.ignoreLinux(mask) {
|
||||
select {
|
||||
case w.Events <- event:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += unix.SizeofInotifyEvent + nameLen
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Certain types of events can be "ignored" and not sent over the Events
|
||||
// channel. Such as events marked ignore by the kernel, or MODIFY events
|
||||
// against files that do not exist.
|
||||
func (e *Event) ignoreLinux(mask uint32) bool {
|
||||
// Ignore anything the inotify API says to ignore
|
||||
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
|
||||
return true
|
||||
}
|
||||
|
||||
// If the event is not a DELETE or RENAME, the file must exist.
|
||||
// Otherwise the event is ignored.
|
||||
// *Note*: this was put in place because it was seen that a MODIFY
|
||||
// event was sent after the DELETE. This ignores that MODIFY and
|
||||
// assumes a DELETE will come or has come if the file doesn't exist.
|
||||
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
|
||||
_, statErr := os.Lstat(e.Name)
|
||||
return os.IsNotExist(statErr)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on an inotify mask.
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
|
@ -1,187 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type fdPoller struct {
|
||||
fd int // File descriptor (as returned by the inotify_init() syscall)
|
||||
epfd int // Epoll file descriptor
|
||||
pipe [2]int // Pipe for waking up
|
||||
}
|
||||
|
||||
func emptyPoller(fd int) *fdPoller {
|
||||
poller := new(fdPoller)
|
||||
poller.fd = fd
|
||||
poller.epfd = -1
|
||||
poller.pipe[0] = -1
|
||||
poller.pipe[1] = -1
|
||||
return poller
|
||||
}
|
||||
|
||||
// Create a new inotify poller.
|
||||
// This creates an inotify handler, and an epoll handler.
|
||||
func newFdPoller(fd int) (*fdPoller, error) {
|
||||
var errno error
|
||||
poller := emptyPoller(fd)
|
||||
defer func() {
|
||||
if errno != nil {
|
||||
poller.close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Create epoll fd
|
||||
poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
|
||||
if poller.epfd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
|
||||
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
// Register inotify fd with epoll
|
||||
event := unix.EpollEvent{
|
||||
Fd: int32(poller.fd),
|
||||
Events: unix.EPOLLIN,
|
||||
}
|
||||
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
// Register pipe fd with epoll
|
||||
event = unix.EpollEvent{
|
||||
Fd: int32(poller.pipe[0]),
|
||||
Events: unix.EPOLLIN,
|
||||
}
|
||||
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
return poller, nil
|
||||
}
|
||||
|
||||
// Wait using epoll.
|
||||
// Returns true if something is ready to be read,
|
||||
// false if there is not.
|
||||
func (poller *fdPoller) wait() (bool, error) {
|
||||
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
|
||||
// I don't know whether epoll_wait returns the number of events returned,
|
||||
// or the total number of events ready.
|
||||
// I decided to catch both by making the buffer one larger than the maximum.
|
||||
events := make([]unix.EpollEvent, 7)
|
||||
for {
|
||||
n, errno := unix.EpollWait(poller.epfd, events, -1)
|
||||
if n == -1 {
|
||||
if errno == unix.EINTR {
|
||||
continue
|
||||
}
|
||||
return false, errno
|
||||
}
|
||||
if n == 0 {
|
||||
// If there are no events, try again.
|
||||
continue
|
||||
}
|
||||
if n > 6 {
|
||||
// This should never happen. More events were returned than should be possible.
|
||||
return false, errors.New("epoll_wait returned more events than I know what to do with")
|
||||
}
|
||||
ready := events[:n]
|
||||
epollhup := false
|
||||
epollerr := false
|
||||
epollin := false
|
||||
for _, event := range ready {
|
||||
if event.Fd == int32(poller.fd) {
|
||||
if event.Events&unix.EPOLLHUP != 0 {
|
||||
// This should not happen, but if it does, treat it as a wakeup.
|
||||
epollhup = true
|
||||
}
|
||||
if event.Events&unix.EPOLLERR != 0 {
|
||||
// If an error is waiting on the file descriptor, we should pretend
|
||||
// something is ready to read, and let unix.Read pick up the error.
|
||||
epollerr = true
|
||||
}
|
||||
if event.Events&unix.EPOLLIN != 0 {
|
||||
// There is data to read.
|
||||
epollin = true
|
||||
}
|
||||
}
|
||||
if event.Fd == int32(poller.pipe[0]) {
|
||||
if event.Events&unix.EPOLLHUP != 0 {
|
||||
// Write pipe descriptor was closed, by us. This means we're closing down the
|
||||
// watcher, and we should wake up.
|
||||
}
|
||||
if event.Events&unix.EPOLLERR != 0 {
|
||||
// If an error is waiting on the pipe file descriptor.
|
||||
// This is an absolute mystery, and should never ever happen.
|
||||
return false, errors.New("Error on the pipe descriptor.")
|
||||
}
|
||||
if event.Events&unix.EPOLLIN != 0 {
|
||||
// This is a regular wakeup, so we have to clear the buffer.
|
||||
err := poller.clearWake()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if epollhup || epollerr || epollin {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Close the write end of the poller.
|
||||
func (poller *fdPoller) wake() error {
|
||||
buf := make([]byte, 1)
|
||||
n, errno := unix.Write(poller.pipe[1], buf)
|
||||
if n == -1 {
|
||||
if errno == unix.EAGAIN {
|
||||
// Buffer is full, poller will wake.
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (poller *fdPoller) clearWake() error {
|
||||
// You have to be woken up a LOT in order to get to 100!
|
||||
buf := make([]byte, 100)
|
||||
n, errno := unix.Read(poller.pipe[0], buf)
|
||||
if n == -1 {
|
||||
if errno == unix.EAGAIN {
|
||||
// Buffer is empty, someone else cleared our wake.
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close all poller file descriptors, but not the one passed to it.
|
||||
func (poller *fdPoller) close() {
|
||||
if poller.pipe[1] != -1 {
|
||||
unix.Close(poller.pipe[1])
|
||||
}
|
||||
if poller.pipe[0] != -1 {
|
||||
unix.Close(poller.pipe[0])
|
||||
}
|
||||
if poller.epfd != -1 {
|
||||
unix.Close(poller.epfd)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,535 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||
// +build freebsd openbsd netbsd dragonfly darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
|
||||
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||
|
||||
mu sync.Mutex // Protects access to watcher data
|
||||
watches map[string]int // Map of watched file descriptors (key: path).
|
||||
externalWatches map[string]bool // Map of watches added by user of the library.
|
||||
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
|
||||
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
|
||||
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
type pathInfo struct {
|
||||
name string
|
||||
isDir bool
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
kq, err := kqueue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &Watcher{
|
||||
kq: kq,
|
||||
watches: make(map[string]int),
|
||||
dirFlags: make(map[string]uint32),
|
||||
paths: make(map[int]pathInfo),
|
||||
fileExists: make(map[string]bool),
|
||||
externalWatches: make(map[string]bool),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// copy paths to remove while locked
|
||||
var pathsToRemove = make([]string, 0, len(w.watches))
|
||||
for name := range w.watches {
|
||||
pathsToRemove = append(pathsToRemove, name)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
// unlock before calling Remove, which also locks
|
||||
|
||||
for _, name := range pathsToRemove {
|
||||
w.Remove(name)
|
||||
}
|
||||
|
||||
// send a "quit" message to the reader goroutine
|
||||
close(w.done)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
w.mu.Lock()
|
||||
w.externalWatches[name] = true
|
||||
w.mu.Unlock()
|
||||
_, err := w.addWatch(name, noteAllEvents)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
w.mu.Lock()
|
||||
watchfd, ok := w.watches[name]
|
||||
w.mu.Unlock()
|
||||
if !ok {
|
||||
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
|
||||
}
|
||||
|
||||
const registerRemove = unix.EV_DELETE
|
||||
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unix.Close(watchfd)
|
||||
|
||||
w.mu.Lock()
|
||||
isDir := w.paths[watchfd].isDir
|
||||
delete(w.watches, name)
|
||||
delete(w.paths, watchfd)
|
||||
delete(w.dirFlags, name)
|
||||
w.mu.Unlock()
|
||||
|
||||
// Find all watched paths that are in this directory that are not external.
|
||||
if isDir {
|
||||
var pathsToRemove []string
|
||||
w.mu.Lock()
|
||||
for _, path := range w.paths {
|
||||
wdir, _ := filepath.Split(path.name)
|
||||
if filepath.Clean(wdir) == name {
|
||||
if !w.externalWatches[path.name] {
|
||||
pathsToRemove = append(pathsToRemove, path.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, name := range pathsToRemove {
|
||||
// Since these are internal, not much sense in propagating error
|
||||
// to the user, as that will just confuse them with an error about
|
||||
// a path they did not explicitly watch themselves.
|
||||
w.Remove(name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchList returns the directories and files that are being monitered.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for pathname := range w.watches {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||
|
||||
// keventWaitTime to block on each read from kevent
|
||||
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
|
||||
|
||||
// addWatch adds name to the watched file set.
|
||||
// The flags are interpreted as described in kevent(2).
|
||||
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
|
||||
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||
var isDir bool
|
||||
// Make ./name and name equivalent
|
||||
name = filepath.Clean(name)
|
||||
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return "", errors.New("kevent instance already closed")
|
||||
}
|
||||
watchfd, alreadyWatching := w.watches[name]
|
||||
// We already have a watch, but we can still override flags.
|
||||
if alreadyWatching {
|
||||
isDir = w.paths[watchfd].isDir
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if !alreadyWatching {
|
||||
fi, err := os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Don't watch sockets.
|
||||
if fi.Mode()&os.ModeSocket == os.ModeSocket {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Don't watch named pipes.
|
||||
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Follow Symlinks
|
||||
// Unfortunately, Linux can add bogus symlinks to watch list without
|
||||
// issue, and Windows can't do symlinks period (AFAIK). To maintain
|
||||
// consistency, we will act like everything is fine. There will simply
|
||||
// be no file events for broken symlinks.
|
||||
// Hence the returns of nil on errors.
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
name, err = filepath.EvalSymlinks(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
_, alreadyWatching = w.watches[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
if alreadyWatching {
|
||||
return name, nil
|
||||
}
|
||||
|
||||
fi, err = os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
watchfd, err = unix.Open(name, openMode, 0700)
|
||||
if watchfd == -1 {
|
||||
return "", err
|
||||
}
|
||||
|
||||
isDir = fi.IsDir()
|
||||
}
|
||||
|
||||
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
|
||||
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
|
||||
unix.Close(watchfd)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !alreadyWatching {
|
||||
w.mu.Lock()
|
||||
w.watches[name] = watchfd
|
||||
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if isDir {
|
||||
// Watch the directory if it has not been watched before,
|
||||
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||
w.mu.Lock()
|
||||
|
||||
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
|
||||
// Store flags so this watch can be updated later
|
||||
w.dirFlags[name] = flags
|
||||
w.mu.Unlock()
|
||||
|
||||
if watchDir {
|
||||
if err := w.watchDirectoryFiles(name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// readEvents reads from kqueue and converts the received kevents into
|
||||
// Event values that it sends down the Events channel.
|
||||
func (w *Watcher) readEvents() {
|
||||
eventBuffer := make([]unix.Kevent_t, 10)
|
||||
|
||||
loop:
|
||||
for {
|
||||
// See if there is a message on the "done" channel
|
||||
select {
|
||||
case <-w.done:
|
||||
break loop
|
||||
default:
|
||||
}
|
||||
|
||||
// Get new events
|
||||
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
|
||||
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||
if err != nil && err != unix.EINTR {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
break loop
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Flush the events we received to the Events channel
|
||||
for len(kevents) > 0 {
|
||||
kevent := &kevents[0]
|
||||
watchfd := int(kevent.Ident)
|
||||
mask := uint32(kevent.Fflags)
|
||||
w.mu.Lock()
|
||||
path := w.paths[watchfd]
|
||||
w.mu.Unlock()
|
||||
event := newEvent(path.name, mask)
|
||||
|
||||
if path.isDir && !(event.Op&Remove == Remove) {
|
||||
// Double check to make sure the directory exists. This can happen when
|
||||
// we do a rm -fr on a recursively watched folders and we receive a
|
||||
// modification event first but the folder has been deleted and later
|
||||
// receive the delete event
|
||||
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
||||
// mark is as delete event
|
||||
event.Op |= Remove
|
||||
}
|
||||
}
|
||||
|
||||
if event.Op&Rename == Rename || event.Op&Remove == Remove {
|
||||
w.Remove(event.Name)
|
||||
w.mu.Lock()
|
||||
delete(w.fileExists, event.Name)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||
w.sendDirectoryChangeEvents(event.Name)
|
||||
} else {
|
||||
// Send the event on the Events channel.
|
||||
select {
|
||||
case w.Events <- event:
|
||||
case <-w.done:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
if event.Op&Remove == Remove {
|
||||
// Look for a file that may have overwritten this.
|
||||
// For example, mv f1 f2 will delete f2, then create f2.
|
||||
if path.isDir {
|
||||
fileDir := filepath.Clean(event.Name)
|
||||
w.mu.Lock()
|
||||
_, found := w.watches[fileDir]
|
||||
w.mu.Unlock()
|
||||
if found {
|
||||
// make sure the directory exists before we watch for changes. When we
|
||||
// do a recursive watch and perform rm -fr, the parent directory might
|
||||
// have gone missing, ignore the missing directory and let the
|
||||
// upcoming delete event remove the watch from the parent directory.
|
||||
if _, err := os.Lstat(fileDir); err == nil {
|
||||
w.sendDirectoryChangeEvents(fileDir)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
filePath := filepath.Clean(event.Name)
|
||||
if fileInfo, err := os.Lstat(filePath); err == nil {
|
||||
w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move to next event
|
||||
kevents = kevents[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup
|
||||
err := unix.Close(w.kq)
|
||||
if err != nil {
|
||||
// only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func newCreateEvent(name string) Event {
|
||||
return Event{Name: name, Op: Create}
|
||||
}
|
||||
|
||||
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendDirectoryEvents searches the directory for newly created files
|
||||
// and sends them over the event channel. This functionality is to have
|
||||
// the BSD version of fsnotify match Linux inotify which provides a
|
||||
// create event for files created in a watched directory.
|
||||
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Search for new files
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
|
||||
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
|
||||
w.mu.Lock()
|
||||
_, doesExist := w.fileExists[filePath]
|
||||
w.mu.Unlock()
|
||||
if !doesExist {
|
||||
// Send create event
|
||||
select {
|
||||
case w.Events <- newCreateEvent(filePath):
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// like watchDirectoryFiles (but without doing another ReadDir)
|
||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
|
||||
if fileInfo.IsDir() {
|
||||
// mimic Linux providing delete events for subdirectories
|
||||
// but preserve the flags used if currently watching subdirectory
|
||||
w.mu.Lock()
|
||||
flags := w.dirFlags[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
|
||||
return w.addWatch(name, flags)
|
||||
}
|
||||
|
||||
// watch file to mimic Linux inotify
|
||||
return w.addWatch(name, noteAllEvents)
|
||||
}
|
||||
|
||||
// kqueue creates a new kernel event queue and returns a descriptor.
|
||||
func kqueue() (kq int, err error) {
|
||||
kq, err = unix.Kqueue()
|
||||
if kq == -1 {
|
||||
return kq, err
|
||||
}
|
||||
return kq, nil
|
||||
}
|
||||
|
||||
// register events with the queue
|
||||
func register(kq int, fds []int, flags int, fflags uint32) error {
|
||||
changes := make([]unix.Kevent_t, len(fds))
|
||||
|
||||
for i, fd := range fds {
|
||||
// SetKevent converts int to the platform-specific types:
|
||||
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
|
||||
changes[i].Fflags = fflags
|
||||
}
|
||||
|
||||
// register the events
|
||||
success, err := unix.Kevent(kq, changes, nil, nil)
|
||||
if success == -1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// read retrieves pending events, or waits until an event occurs.
|
||||
// A timeout of nil blocks indefinitely, while 0 polls the queue.
|
||||
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
|
||||
n, err := unix.Kevent(kq, nil, events, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return events[0:n], nil
|
||||
}
|
||||
|
||||
// durationToTimespec prepares a timeout value
|
||||
func durationToTimespec(d time.Duration) unix.Timespec {
|
||||
return unix.NsecToTimespec(d.Nanoseconds())
|
||||
}
|
||||
|
|
@ -0,0 +1,208 @@
|
|||
#!/usr/bin/env zsh
|
||||
[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1
|
||||
setopt err_exit no_unset pipefail extended_glob
|
||||
|
||||
# Simple script to update the godoc comments on all watchers. Probably took me
|
||||
# more time to write this than doing it manually, but ah well 🙃
|
||||
|
||||
watcher=$(<<EOF
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # macOS notes
|
||||
//
|
||||
// Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
// temporary workaround is to add your folder(s) to the "Spotlight Privacy
|
||||
// Settings" until we have a native FSEvents implementation (see [#11]).
|
||||
//
|
||||
// [#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
// [#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
EOF
|
||||
)
|
||||
|
||||
new=$(<<EOF
|
||||
// NewWatcher creates a new Watcher.
|
||||
EOF
|
||||
)
|
||||
|
||||
add=$(<<EOF
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; attempting to watch it more than once will
|
||||
// return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// added. A watch will be automatically removed if the path is deleted.
|
||||
//
|
||||
// A path will remain watched if it gets renamed to somewhere else on the same
|
||||
// filesystem, but the monitor will get removed if the path gets deleted and
|
||||
// re-created, or if it's moved to a different filesystem.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many tools update files atomically. Instead of "just" writing
|
||||
// to the file a temporary file will be written to first, and if successful the
|
||||
// temporary file is moved to to destination removing the original, or some
|
||||
// variant thereof. The watcher on the original file is now lost, as it no
|
||||
// longer exists.
|
||||
//
|
||||
// Instead, watch the parent directory and use Event.Name to filter out files
|
||||
// you're not interested in. There is an example of this in [cmd/fsnotify/file.go].
|
||||
EOF
|
||||
)
|
||||
|
||||
remove=$(<<EOF
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
EOF
|
||||
)
|
||||
|
||||
close=$(<<EOF
|
||||
// Close removes all watches and closes the events channel.
|
||||
EOF
|
||||
)
|
||||
|
||||
watchlist=$(<<EOF
|
||||
// WatchList returns all paths added with [Add] (and are not yet removed).
|
||||
EOF
|
||||
)
|
||||
|
||||
events=$(<<EOF
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, so you
|
||||
// probably want to wait until you've stopped receiving
|
||||
// them (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// and on kqueue when a file is truncated. On Windows
|
||||
// it's never sent.
|
||||
EOF
|
||||
)
|
||||
|
||||
errors=$(<<EOF
|
||||
// Errors sends any errors.
|
||||
EOF
|
||||
)
|
||||
|
||||
set-cmt() {
|
||||
local pat=$1
|
||||
local cmt=$2
|
||||
|
||||
IFS=$'\n' local files=($(grep -n $pat backend_*~*_test.go))
|
||||
for f in $files; do
|
||||
IFS=':' local fields=($=f)
|
||||
local file=$fields[1]
|
||||
local end=$(( $fields[2] - 1 ))
|
||||
|
||||
# Find start of comment.
|
||||
local start=0
|
||||
IFS=$'\n' local lines=($(head -n$end $file))
|
||||
for (( i = 1; i <= $#lines; i++ )); do
|
||||
local line=$lines[-$i]
|
||||
if ! grep -q '^[[:space:]]*//' <<<$line; then
|
||||
start=$(( end - (i - 2) ))
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
head -n $(( start - 1 )) $file >/tmp/x
|
||||
print -r -- $cmt >>/tmp/x
|
||||
tail -n+$(( end + 1 )) $file >>/tmp/x
|
||||
mv /tmp/x $file
|
||||
done
|
||||
}
|
||||
|
||||
set-cmt '^type Watcher struct ' $watcher
|
||||
set-cmt '^func NewWatcher(' $new
|
||||
set-cmt '^func (w \*Watcher) Add(' $add
|
||||
set-cmt '^func (w \*Watcher) Remove(' $remove
|
||||
set-cmt '^func (w \*Watcher) Close(' $close
|
||||
set-cmt '^func (w \*Watcher) WatchList(' $watchlist
|
||||
set-cmt '^[[:space:]]*Events *chan Event$' $events
|
||||
set-cmt '^[[:space:]]*Errors *chan error$' $errors
|
||||
|
|
@ -1,7 +1,3 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly
|
||||
// +build freebsd openbsd netbsd dragonfly
|
||||
|
||||
|
|
@ -1,7 +1,3 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
|
|
@ -1,586 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
mu sync.Mutex // Map access
|
||||
port syscall.Handle // Handle to completion port
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
quit chan chan<- error
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
w := &Watcher{
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
Events: make(chan Event, 50),
|
||||
Errors: make(chan error),
|
||||
quit: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed {
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.quit <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
if w.isClosed {
|
||||
return errors.New("watcher already closed")
|
||||
}
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sysFSALLEVENTS,
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(name),
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// WatchList returns the directories and files that are being monitered.
|
||||
func (w *Watcher) WatchList() []string {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for _, entry := range w.watches {
|
||||
for _, watchEntry := range entry {
|
||||
entries = append(entries, watchEntry.path)
|
||||
}
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
const (
|
||||
// Options for AddWatch
|
||||
sysFSONESHOT = 0x80000000
|
||||
sysFSONLYDIR = 0x1000000
|
||||
|
||||
// Events
|
||||
sysFSACCESS = 0x1
|
||||
sysFSALLEVENTS = 0xfff
|
||||
sysFSATTRIB = 0x4
|
||||
sysFSCLOSE = 0x18
|
||||
sysFSCREATE = 0x100
|
||||
sysFSDELETE = 0x200
|
||||
sysFSDELETESELF = 0x400
|
||||
sysFSMODIFY = 0x2
|
||||
sysFSMOVE = 0xc0
|
||||
sysFSMOVEDFROM = 0x40
|
||||
sysFSMOVEDTO = 0x80
|
||||
sysFSMOVESELF = 0x800
|
||||
|
||||
// Special events
|
||||
sysFSIGNORED = 0x8000
|
||||
sysFSQOVERFLOW = 0x4000
|
||||
)
|
||||
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&sysFSMODIFY == sysFSMODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&sysFSATTRIB == sysFSATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
const (
|
||||
opAddWatch = iota
|
||||
opRemoveWatch
|
||||
)
|
||||
|
||||
const (
|
||||
provisional uint64 = 1 << (32 + iota)
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
handle syscall.Handle
|
||||
volume uint32
|
||||
index uint64
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov syscall.Overlapped
|
||||
ino *inode // i-number
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf [4096]byte
|
||||
}
|
||||
|
||||
type indexMap map[uint64]*watch
|
||||
type watchMap map[uint32]indexMap
|
||||
|
||||
func (w *Watcher) wakeupReader() error {
|
||||
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if e != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDir(pathname string) (dir string, err error) {
|
||||
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
|
||||
if e != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", e)
|
||||
}
|
||||
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
dir = pathname
|
||||
} else {
|
||||
dir, _ = filepath.Split(pathname)
|
||||
dir = filepath.Clean(dir)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getIno(path string) (ino *inode, err error) {
|
||||
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
|
||||
syscall.FILE_LIST_DIRECTORY,
|
||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||
nil, syscall.OPEN_EXISTING,
|
||||
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateFile", e)
|
||||
}
|
||||
var fi syscall.ByHandleFileInformation
|
||||
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
|
||||
syscall.CloseHandle(h)
|
||||
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
|
||||
}
|
||||
ino = &inode{
|
||||
handle: h,
|
||||
volume: fi.VolumeSerialNumber,
|
||||
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||
}
|
||||
return ino, nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) get(ino *inode) *watch {
|
||||
if i := m[ino.volume]; i != nil {
|
||||
return i[ino.index]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) set(ino *inode, watch *watch) {
|
||||
i := m[ino.volume]
|
||||
if i == nil {
|
||||
i = make(indexMap)
|
||||
m[ino.volume] = i
|
||||
}
|
||||
i[ino.index] = watch
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if flags&sysFSONLYDIR != 0 && pathname != dir {
|
||||
return nil
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watchEntry := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watchEntry == nil {
|
||||
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
return os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
w.mu.Unlock()
|
||||
flags |= provisional
|
||||
} else {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask |= flags
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||
}
|
||||
if err = w.startRead(watchEntry); err != nil {
|
||||
return err
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask &= ^provisional
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) remWatch(pathname string) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watch == nil {
|
||||
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
return w.startRead(watch)
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) startRead(watch *watch) error {
|
||||
if e := syscall.CancelIo(watch.ino.handle); e != nil {
|
||||
w.Errors <- os.NewSyscallError("CancelIo", e)
|
||||
w.deleteWatch(watch)
|
||||
}
|
||||
mask := toWindowsFlags(watch.mask)
|
||||
for _, m := range watch.names {
|
||||
mask |= toWindowsFlags(m)
|
||||
}
|
||||
if mask == 0 {
|
||||
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
|
||||
w.Errors <- os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
w.mu.Lock()
|
||||
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||
if e != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", e)
|
||||
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
|
||||
if watch.mask&sysFSONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Events channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
n, key uint32
|
||||
ov *syscall.Overlapped
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, index := range indexes {
|
||||
for _, watch := range index {
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if e := syscall.CloseHandle(w.port); e != nil {
|
||||
err = os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
ch <- err
|
||||
return
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch e {
|
||||
case syscall.ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
|
||||
} else {
|
||||
// The i/o succeeded but the buffer is full.
|
||||
// In theory we should be building up a full packet.
|
||||
// In practice we can get away with just carrying on.
|
||||
n = uint32(unsafe.Sizeof(watch.buf))
|
||||
}
|
||||
case syscall.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
case syscall.ERROR_OPERATION_ABORTED:
|
||||
// CancelIo was called on this handle
|
||||
continue
|
||||
default:
|
||||
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
|
||||
continue
|
||||
case nil:
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.Events <- newEvent("", sysFSQOVERFLOW)
|
||||
w.Errors <- errors.New("short read in readEvents()")
|
||||
break
|
||||
}
|
||||
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||
// TODO: Consider using unsafe.Slice that is available from go1.17
|
||||
// https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang
|
||||
// instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name
|
||||
size := int(raw.FileNameLength / 2)
|
||||
var buf []uint16
|
||||
sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||
sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
|
||||
sh.Len = size
|
||||
sh.Cap = size
|
||||
name := syscall.UTF16ToString(buf)
|
||||
fullname := filepath.Join(watch.path, name)
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
mask = sysFSDELETESELF
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
mask = sysFSMODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
watch.rename = name
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
if watch.names[watch.rename] != 0 {
|
||||
watch.names[name] |= watch.names[watch.rename]
|
||||
delete(watch.names, watch.rename)
|
||||
mask = sysFSMOVESELF
|
||||
}
|
||||
}
|
||||
|
||||
sendNameEvent := func() {
|
||||
if w.sendEvent(fullname, watch.names[name]&mask) {
|
||||
if watch.names[name]&sysFSONESHOT != 0 {
|
||||
delete(watch.names, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
sendNameEvent()
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
|
||||
if watch.mask&sysFSONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
fullname = filepath.Join(watch.path, watch.rename)
|
||||
sendNameEvent()
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
if raw.NextEntryOffset == 0 {
|
||||
break
|
||||
}
|
||||
offset += raw.NextEntryOffset
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.startRead(watch); err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
event := newEvent(name, uint32(mask))
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.quit <- ch
|
||||
case w.Events <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sysFSACCESS != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
|
||||
}
|
||||
if mask&sysFSMODIFY != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&sysFSATTRIB != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||
}
|
||||
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case syscall.FILE_ACTION_ADDED:
|
||||
return sysFSCREATE
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
return sysFSDELETE
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
return sysFSMODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
return sysFSMOVEDFROM
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
return sysFSMOVEDTO
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
|
@ -18,6 +18,7 @@ package retry
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/retry/wait"
|
||||
|
|
@ -36,7 +37,7 @@ type temporary interface {
|
|||
|
||||
// IsTemporary returns true if err implements Temporary() and it returns true.
|
||||
func IsTemporary(err error) bool {
|
||||
if err == context.DeadlineExceeded {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return false
|
||||
}
|
||||
if te, ok := err.(temporary); ok && te.Temporary() {
|
||||
|
|
|
|||
3
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
generated
vendored
3
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
generated
vendored
|
|
@ -87,6 +87,9 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) {
|
|||
|
||||
// If we hit a WWW-Authenticate challenge, it might be due to expired tokens or insufficient scope.
|
||||
if challenges := authchallenge.ResponseChallenges(res); len(challenges) != 0 {
|
||||
// close out old response, since we will not return it.
|
||||
res.Body.Close()
|
||||
|
||||
newScopes := []string{}
|
||||
for _, wac := range challenges {
|
||||
// TODO(jonjohnsonjr): Should we also update "realm" or "service"?
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
{
|
||||
"v2": "2.6.0"
|
||||
"v2": "2.7.0"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,13 @@
|
|||
# Changelog
|
||||
|
||||
## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6))
|
||||
* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f))
|
||||
|
||||
## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -233,30 +233,49 @@ func (a *APIError) Metadata() map[string]string {
|
|||
|
||||
}
|
||||
|
||||
// FromError parses a Status error or a googleapi.Error and builds an APIError.
|
||||
func FromError(err error) (*APIError, bool) {
|
||||
if err == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
ae := APIError{err: err}
|
||||
// setDetailsFromError parses a Status error or a googleapi.Error
|
||||
// and sets status and details or httpErr and details, respectively.
|
||||
// It returns false if neither Status nor googleapi.Error can be parsed.
|
||||
func (a *APIError) setDetailsFromError(err error) bool {
|
||||
st, isStatus := status.FromError(err)
|
||||
var herr *googleapi.Error
|
||||
isHTTPErr := errors.As(err, &herr)
|
||||
|
||||
switch {
|
||||
case isStatus:
|
||||
ae.status = st
|
||||
ae.details = parseDetails(st.Details())
|
||||
a.status = st
|
||||
a.details = parseDetails(st.Details())
|
||||
case isHTTPErr:
|
||||
ae.httpErr = herr
|
||||
ae.details = parseHTTPDetails(herr)
|
||||
a.httpErr = herr
|
||||
a.details = parseHTTPDetails(herr)
|
||||
default:
|
||||
return nil, false
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return &ae, true
|
||||
// FromError parses a Status error or a googleapi.Error and builds an
|
||||
// APIError, wrapping the provided error in the new APIError. It
|
||||
// returns false if neither Status nor googleapi.Error can be parsed.
|
||||
func FromError(err error) (*APIError, bool) {
|
||||
return ParseError(err, true)
|
||||
}
|
||||
|
||||
// ParseError parses a Status error or a googleapi.Error and builds an
|
||||
// APIError. If wrap is true, it wraps the error in the new APIError.
|
||||
// It returns false if neither Status nor googleapi.Error can be parsed.
|
||||
func ParseError(err error, wrap bool) (*APIError, bool) {
|
||||
if err == nil {
|
||||
return nil, false
|
||||
}
|
||||
ae := APIError{}
|
||||
if wrap {
|
||||
ae = APIError{err: err}
|
||||
}
|
||||
if !ae.setDetailsFromError(err) {
|
||||
return nil, false
|
||||
}
|
||||
return &ae, true
|
||||
}
|
||||
|
||||
// parseDetails accepts a slice of interface{} that should be backed by some
|
||||
|
|
|
|||
|
|
@ -30,4 +30,4 @@
|
|||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "2.6.0"
|
||||
const Version = "2.7.0"
|
||||
|
|
|
|||
|
|
@ -1,5 +1,17 @@
|
|||
# HCL Changelog
|
||||
|
||||
## v2.15.0 (November 10, 2022)
|
||||
|
||||
### Bugs Fixed
|
||||
|
||||
* ext/typeexpr: Skip null objects when applying defaults. This prevents crashes when null objects are creating inside collections, and stops incomplete objects being created with only optional attributes set. ([#567](https://github.com/hashicorp/hcl/pull/567))
|
||||
* ext/typeexpr: Ensure default values do not have optional metadata attached. This prevents crashes when default values are inserted into concrete go-cty values that have also been stripped of their optional metadata. ([#568](https://github.com/hashicorp/hcl/pull/568))
|
||||
|
||||
### Enhancements
|
||||
|
||||
* ext/typeexpr: With the [go-cty](https://github.com/zclconf/go-cty) upstream depenendency updated to v1.12.0, the `Defaults` struct and associated functions can apply additional and more flexible 'unsafe' conversions (examples include tuples into collections such as lists and sets, and additional safety around null and dynamic values). ([#564](https://github.com/hashicorp/hcl/pull/564))
|
||||
* ext/typeexpr: With the [go-cty](https://github.com/zclconf/go-cty) upstream depenendency updated to v1.12.0, users should now apply the go-cty convert functionality *before* setting defaults on a given `cty.Value`, rather than after, if they require a specific `cty.Type`. ([#564](https://github.com/hashicorp/hcl/pull/564))
|
||||
|
||||
## v2.14.1 (September 23, 2022)
|
||||
|
||||
### Bugs Fixed
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ Comments serve as program documentation and come in two forms:
|
|||
sequence, and may have any characters within except the ending sequence.
|
||||
An inline comments is considered equivalent to a whitespace sequence.
|
||||
|
||||
Comments and whitespace cannot begin within within other comments, or within
|
||||
Comments and whitespace cannot begin within other comments, or within
|
||||
template literals except inside an interpolation sequence or template directive.
|
||||
|
||||
### Identifiers
|
||||
|
|
|
|||
|
|
@ -2,4 +2,4 @@
|
|||
package hcloud
|
||||
|
||||
// Version is the library's version following Semantic Versioning.
|
||||
const Version = "1.35.2"
|
||||
const Version = "1.37.0"
|
||||
|
|
|
|||
|
|
@ -90,12 +90,6 @@ type PrimaryIPUpdateOpts struct {
|
|||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
// PrimaryIPUpdateResult defines the response
|
||||
// when updating a Primary IP.
|
||||
type PrimaryIPUpdateResult struct {
|
||||
PrimaryIP PrimaryIP `json:"primary_ip"`
|
||||
}
|
||||
|
||||
// PrimaryIPAssignOpts defines the request to
|
||||
// assign a Primary IP to an assignee (usually a server).
|
||||
type PrimaryIPAssignOpts struct {
|
||||
|
|
@ -240,10 +234,12 @@ func (c *PrimaryIPClient) List(ctx context.Context, opts PrimaryIPListOpts) ([]*
|
|||
|
||||
// All returns all Primary IPs.
|
||||
func (c *PrimaryIPClient) All(ctx context.Context) ([]*PrimaryIP, error) {
|
||||
allPrimaryIPs := []*PrimaryIP{}
|
||||
return c.AllWithOpts(ctx, PrimaryIPListOpts{ListOpts: ListOpts{PerPage: 50}})
|
||||
}
|
||||
|
||||
opts := PrimaryIPListOpts{}
|
||||
opts.PerPage = 50
|
||||
// AllWithOpts returns all Primary IPs for the given options.
|
||||
func (c *PrimaryIPClient) AllWithOpts(ctx context.Context, opts PrimaryIPListOpts) ([]*PrimaryIP, error) {
|
||||
var allPrimaryIPs []*PrimaryIP
|
||||
|
||||
err := c.client.all(func(page int) (*Response, error) {
|
||||
opts.Page = page
|
||||
|
|
@ -311,12 +307,12 @@ func (c *PrimaryIPClient) Update(ctx context.Context, primaryIP *PrimaryIP, reqB
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
respBody := PrimaryIPUpdateResult{}
|
||||
var respBody schema.PrimaryIPUpdateResult
|
||||
resp, err := c.client.Do(req, &respBody)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return &respBody.PrimaryIP, resp, nil
|
||||
return PrimaryIPFromSchema(respBody.PrimaryIP), resp, nil
|
||||
}
|
||||
|
||||
// Assign a Primary IP to a resource
|
||||
|
|
|
|||
|
|
@ -47,3 +47,9 @@ type PrimaryIPGetResult struct {
|
|||
type PrimaryIPListResult struct {
|
||||
PrimaryIPs []PrimaryIP `json:"primary_ips"`
|
||||
}
|
||||
|
||||
// PrimaryIPUpdateResult defines the response
|
||||
// when updating a Primary IP.
|
||||
type PrimaryIPUpdateResult struct {
|
||||
PrimaryIP PrimaryIP `json:"primary_ip"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -136,6 +136,12 @@ type ServerCreateResponse struct {
|
|||
NextActions []Action `json:"next_actions"`
|
||||
}
|
||||
|
||||
// ServerDeleteResponse defines the schema of the response when
|
||||
// deleting a server.
|
||||
type ServerDeleteResponse struct {
|
||||
Action Action `json:"action"`
|
||||
}
|
||||
|
||||
// ServerUpdateRequest defines the schema of the request to update a server.
|
||||
type ServerUpdateRequest struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
|
|
|
|||
|
|
@ -458,13 +458,34 @@ func (c *ServerClient) Create(ctx context.Context, opts ServerCreateOpts) (Serve
|
|||
return result, resp, nil
|
||||
}
|
||||
|
||||
// ServerDeleteResult is the result of a delete server call.
|
||||
type ServerDeleteResult struct {
|
||||
Action *Action
|
||||
}
|
||||
|
||||
// Delete deletes a server.
|
||||
// This method is deprecated, use ServerClient.DeleteWithResult instead.
|
||||
func (c *ServerClient) Delete(ctx context.Context, server *Server) (*Response, error) {
|
||||
_, resp, err := c.DeleteWithResult(ctx, server)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Delete deletes a server and returns the parsed response containing the action.
|
||||
func (c *ServerClient) DeleteWithResult(ctx context.Context, server *Server) (*ServerDeleteResult, *Response, error) {
|
||||
req, err := c.client.NewRequest(ctx, "DELETE", fmt.Sprintf("/servers/%d", server.ID), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return &ServerDeleteResult{}, nil, err
|
||||
}
|
||||
return c.client.Do(req, nil)
|
||||
|
||||
var respBody schema.ServerDeleteResponse
|
||||
resp, err := c.client.Do(req, &respBody)
|
||||
if err != nil {
|
||||
return &ServerDeleteResult{}, resp, err
|
||||
}
|
||||
|
||||
return &ServerDeleteResult{
|
||||
Action: ActionFromSchema(respBody.Action),
|
||||
}, resp, nil
|
||||
}
|
||||
|
||||
// ServerUpdateOpts specifies options for updating a server.
|
||||
|
|
|
|||
|
|
@ -246,6 +246,7 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
|||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||
// error allows shortcuts like
|
||||
//
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
|
||||
c, err := v.GetMetricWithLabelValues(lvs...)
|
||||
|
|
@ -257,6 +258,7 @@ func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
|
|||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. Not returning an error allows shortcuts like
|
||||
//
|
||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (v *CounterVec) With(labels Labels) Counter {
|
||||
c, err := v.GetMetricWith(labels)
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@
|
|||
// All exported functions and methods are safe to be used concurrently unless
|
||||
// specified otherwise.
|
||||
//
|
||||
// A Basic Example
|
||||
// # A Basic Example
|
||||
//
|
||||
// As a starting point, a very basic usage example:
|
||||
//
|
||||
|
|
@ -35,41 +35,52 @@
|
|||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
// )
|
||||
//
|
||||
// var (
|
||||
// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
// type metrics struct {
|
||||
// cpuTemp prometheus.Gauge
|
||||
// hdFailures *prometheus.CounterVec
|
||||
// }
|
||||
//
|
||||
// func NewMetrics(reg prometheus.Registerer) *metrics {
|
||||
// m := &metrics{
|
||||
// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
// Name: "cpu_temperature_celsius",
|
||||
// Help: "Current temperature of the CPU.",
|
||||
// })
|
||||
// hdFailures = prometheus.NewCounterVec(
|
||||
// }),
|
||||
// hdFailures: prometheus.NewCounterVec(
|
||||
// prometheus.CounterOpts{
|
||||
// Name: "hd_errors_total",
|
||||
// Help: "Number of hard-disk errors.",
|
||||
// },
|
||||
// []string{"device"},
|
||||
// )
|
||||
// )
|
||||
//
|
||||
// func init() {
|
||||
// // Metrics have to be registered to be exposed:
|
||||
// prometheus.MustRegister(cpuTemp)
|
||||
// prometheus.MustRegister(hdFailures)
|
||||
// ),
|
||||
// }
|
||||
// reg.MustRegister(m.cpuTemp)
|
||||
// reg.MustRegister(m.hdFailures)
|
||||
// return m
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// cpuTemp.Set(65.3)
|
||||
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
||||
// // Create a non-global registry.
|
||||
// reg := prometheus.NewRegistry()
|
||||
//
|
||||
// // The Handler function provides a default handler to expose metrics
|
||||
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
||||
// http.Handle("/metrics", promhttp.Handler())
|
||||
// // Create new metrics and register them using the custom registry.
|
||||
// m := NewMetrics(reg)
|
||||
// // Set values for the new created metrics.
|
||||
// m.cpuTemp.Set(65.3)
|
||||
// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
||||
//
|
||||
// // Expose metrics and custom registry via an HTTP server
|
||||
// // using the HandleFor function. "/metrics" is the usual endpoint for that.
|
||||
// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
|
||||
// log.Fatal(http.ListenAndServe(":8080", nil))
|
||||
// }
|
||||
//
|
||||
//
|
||||
// This is a complete program that exports two metrics, a Gauge and a Counter,
|
||||
// the latter with a label attached to turn it into a (one-dimensional) vector.
|
||||
// It register the metrics using a custom registry and exposes them via an HTTP server
|
||||
// on the /metrics endpoint.
|
||||
//
|
||||
// Metrics
|
||||
// # Metrics
|
||||
//
|
||||
// The number of exported identifiers in this package might appear a bit
|
||||
// overwhelming. However, in addition to the basic plumbing shown in the example
|
||||
|
|
@ -100,7 +111,7 @@
|
|||
// To create instances of Metrics and their vector versions, you need a suitable
|
||||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
|
||||
//
|
||||
// Custom Collectors and constant Metrics
|
||||
// # Custom Collectors and constant Metrics
|
||||
//
|
||||
// While you could create your own implementations of Metric, most likely you
|
||||
// will only ever implement the Collector interface on your own. At a first
|
||||
|
|
@ -141,7 +152,7 @@
|
|||
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
|
||||
// shortcuts.
|
||||
//
|
||||
// Advanced Uses of the Registry
|
||||
// # Advanced Uses of the Registry
|
||||
//
|
||||
// While MustRegister is the by far most common way of registering a Collector,
|
||||
// sometimes you might want to handle the errors the registration might cause.
|
||||
|
|
@ -176,23 +187,23 @@
|
|||
// NewProcessCollector). With a custom registry, you are in control and decide
|
||||
// yourself about the Collectors to register.
|
||||
//
|
||||
// HTTP Exposition
|
||||
// # HTTP Exposition
|
||||
//
|
||||
// The Registry implements the Gatherer interface. The caller of the Gather
|
||||
// method can then expose the gathered metrics in some way. Usually, the metrics
|
||||
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
||||
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
|
||||
//
|
||||
// Pushing to the Pushgateway
|
||||
// # Pushing to the Pushgateway
|
||||
//
|
||||
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
||||
//
|
||||
// Graphite Bridge
|
||||
// # Graphite Bridge
|
||||
//
|
||||
// Functions and examples to push metrics from a Gatherer to Graphite can be
|
||||
// found in the graphite sub-package.
|
||||
//
|
||||
// Other Means of Exposition
|
||||
// # Other Means of Exposition
|
||||
//
|
||||
// More ways of exposing metrics can easily be added by following the approaches
|
||||
// of the existing implementations.
|
||||
|
|
|
|||
|
|
@ -210,6 +210,7 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
|||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||
// error allows shortcuts like
|
||||
//
|
||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||
func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
||||
g, err := v.GetMetricWithLabelValues(lvs...)
|
||||
|
|
@ -221,6 +222,7 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
|||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. Not returning an error allows shortcuts like
|
||||
//
|
||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
func (v *GaugeVec) With(labels Labels) Gauge {
|
||||
g, err := v.GetMetricWith(labels)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
60
vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
generated
vendored
Normal file
60
vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
generated
vendored
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
// Copyright (c) 2015 Björn Rabenstein
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
//
|
||||
// The code in this package is copy/paste to avoid a dependency. Hence this file
|
||||
// carries the copyright of the original repo.
|
||||
// https://github.com/beorn7/floats
|
||||
package internal
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
// minNormalFloat64 is the smallest positive normal value of type float64.
|
||||
var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
|
||||
|
||||
// AlmostEqualFloat64 returns true if a and b are equal within a relative error
|
||||
// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
|
||||
// details of the applied method.
|
||||
func AlmostEqualFloat64(a, b, epsilon float64) bool {
|
||||
if a == b {
|
||||
return true
|
||||
}
|
||||
absA := math.Abs(a)
|
||||
absB := math.Abs(b)
|
||||
diff := math.Abs(a - b)
|
||||
if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
|
||||
return diff < epsilon*minNormalFloat64
|
||||
}
|
||||
return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
|
||||
}
|
||||
|
||||
// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
|
||||
func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !AlmostEqualFloat64(a[i], b[i], epsilon) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
@ -201,9 +201,12 @@ func (m *SequenceMatcher) isBJunk(s string) bool {
|
|||
// If IsJunk is not defined:
|
||||
//
|
||||
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
|
||||
//
|
||||
// alo <= i <= i+k <= ahi
|
||||
// blo <= j <= j+k <= bhi
|
||||
//
|
||||
// and for all (i',j',k') meeting those conditions,
|
||||
//
|
||||
// k >= k'
|
||||
// i <= i'
|
||||
// and if i == i', j <= j'
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import (
|
|||
// Labels represents a collection of label name -> value mappings. This type is
|
||||
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
|
||||
// metric vector Collectors, e.g.:
|
||||
//
|
||||
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
|
||||
//
|
||||
// The other use-case is the specification of constant label pairs in Opts or to
|
||||
|
|
|
|||
4
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
4
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
|
|
@ -73,7 +73,7 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
|
|||
return func(r *http.Request) (*http.Response, error) {
|
||||
resp, err := next.RoundTrip(r)
|
||||
if err == nil {
|
||||
exemplarAdd(
|
||||
addWithExemplar(
|
||||
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
|
||||
1,
|
||||
rtOpts.getExemplarFn(r.Context()),
|
||||
|
|
@ -116,7 +116,7 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
|
|||
start := time.Now()
|
||||
resp, err := next.RoundTrip(r)
|
||||
if err == nil {
|
||||
exemplarObserve(
|
||||
observeWithExemplar(
|
||||
obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
|
||||
time.Since(start).Seconds(),
|
||||
rtOpts.getExemplarFn(r.Context()),
|
||||
|
|
|
|||
24
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
24
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
|
|
@ -28,7 +28,9 @@ import (
|
|||
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
|
||||
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
|
||||
|
||||
func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]string) {
|
||||
// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver],
|
||||
// which falls back to [prometheus.Observer.Observe] if no labels are provided.
|
||||
func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) {
|
||||
if labels == nil {
|
||||
obs.Observe(val)
|
||||
return
|
||||
|
|
@ -36,7 +38,9 @@ func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]str
|
|||
obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
|
||||
}
|
||||
|
||||
func exemplarAdd(obs prometheus.Counter, val float64, labels map[string]string) {
|
||||
// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar],
|
||||
// which falls back to [prometheus.Counter.Add] if no labels are provided.
|
||||
func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) {
|
||||
if labels == nil {
|
||||
obs.Add(val)
|
||||
return
|
||||
|
|
@ -91,7 +95,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
|
|||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
|
||||
exemplarObserve(
|
||||
observeWithExemplar(
|
||||
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||
time.Since(now).Seconds(),
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
|
@ -103,7 +107,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
|
|||
now := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
|
||||
exemplarObserve(
|
||||
observeWithExemplar(
|
||||
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
||||
time.Since(now).Seconds(),
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
|
@ -141,7 +145,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
|
|||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
|
||||
exemplarAdd(
|
||||
addWithExemplar(
|
||||
counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||
1,
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
|
@ -151,7 +155,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
|
|||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
next.ServeHTTP(w, r)
|
||||
exemplarAdd(
|
||||
addWithExemplar(
|
||||
counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
||||
1,
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
|
@ -192,7 +196,7 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
|
|||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
now := time.Now()
|
||||
d := newDelegator(w, func(status int) {
|
||||
exemplarObserve(
|
||||
observeWithExemplar(
|
||||
obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)),
|
||||
time.Since(now).Seconds(),
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
|
@ -233,7 +237,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
|
|||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
size := computeApproximateRequestSize(r)
|
||||
exemplarObserve(
|
||||
observeWithExemplar(
|
||||
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||
float64(size),
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
|
@ -244,7 +248,7 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
|
|||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
next.ServeHTTP(w, r)
|
||||
size := computeApproximateRequestSize(r)
|
||||
exemplarObserve(
|
||||
observeWithExemplar(
|
||||
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
||||
float64(size),
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
|
@ -282,7 +286,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
|
|||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
d := newDelegator(w, nil)
|
||||
next.ServeHTTP(d, r)
|
||||
exemplarObserve(
|
||||
observeWithExemplar(
|
||||
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||
float64(d.Written()),
|
||||
hOpts.getExemplarFn(r.Context()),
|
||||
|
|
|
|||
|
|
@ -252,9 +252,12 @@ func (errs MultiError) MaybeUnwrap() error {
|
|||
}
|
||||
|
||||
// Registry registers Prometheus collectors, collects their metrics, and gathers
|
||||
// them into MetricFamilies for exposition. It implements both Registerer and
|
||||
// Gatherer. The zero value is not usable. Create instances with NewRegistry or
|
||||
// NewPedanticRegistry.
|
||||
// them into MetricFamilies for exposition. It implements Registerer, Gatherer,
|
||||
// and Collector. The zero value is not usable. Create instances with
|
||||
// NewRegistry or NewPedanticRegistry.
|
||||
//
|
||||
// Registry implements Collector to allow it to be used for creating groups of
|
||||
// metrics. See the Grouping example for how this can be done.
|
||||
type Registry struct {
|
||||
mtx sync.RWMutex
|
||||
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
|
||||
|
|
@ -556,6 +559,31 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
|||
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||
}
|
||||
|
||||
// Describe implements Collector.
|
||||
func (r *Registry) Describe(ch chan<- *Desc) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
|
||||
// Only report the checked Collectors; unchecked collectors don't report any
|
||||
// Desc.
|
||||
for _, c := range r.collectorsByID {
|
||||
c.Describe(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// Collect implements Collector.
|
||||
func (r *Registry) Collect(ch chan<- Metric) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
|
||||
for _, c := range r.collectorsByID {
|
||||
c.Collect(ch)
|
||||
}
|
||||
for _, c := range r.uncheckedCollectors {
|
||||
c.Collect(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
|
||||
// Prometheus text format, and writes it to a temporary file. Upon success, the
|
||||
// temporary file is renamed to the provided filename.
|
||||
|
|
|
|||
|
|
@ -603,6 +603,7 @@ func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
|
|||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||
// error allows shortcuts like
|
||||
//
|
||||
// myVec.WithLabelValues("404", "GET").Observe(42.21)
|
||||
func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
|
||||
s, err := v.GetMetricWithLabelValues(lvs...)
|
||||
|
|
@ -614,6 +615,7 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
|
|||
|
||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||
// returned an error. Not returning an error allows shortcuts like
|
||||
//
|
||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
|
||||
func (v *SummaryVec) With(labels Labels) Observer {
|
||||
s, err := v.GetMetricWith(labels)
|
||||
|
|
@ -701,6 +703,7 @@ func (s *constSummary) Write(out *dto.Metric) error {
|
|||
//
|
||||
// quantiles maps ranks to quantile values. For example, a median latency of
|
||||
// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
|
||||
//
|
||||
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
|
||||
//
|
||||
// NewConstSummary returns an error if the length of labelValues is not
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ type Timer struct {
|
|||
// NewTimer creates a new Timer. The provided Observer is used to observe a
|
||||
// duration in seconds. Timer is usually used to time a function call in the
|
||||
// following way:
|
||||
//
|
||||
// func TimeMe() {
|
||||
// timer := NewTimer(myHistogram)
|
||||
// defer timer.ObserveDuration()
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: metrics.proto
|
||||
// source: io/prometheus/client/metrics.proto
|
||||
|
||||
package io_prometheus_client
|
||||
|
||||
|
|
@ -24,11 +24,18 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
|||
type MetricType int32
|
||||
|
||||
const (
|
||||
// COUNTER must use the Metric field "counter".
|
||||
MetricType_COUNTER MetricType = 0
|
||||
// GAUGE must use the Metric field "gauge".
|
||||
MetricType_GAUGE MetricType = 1
|
||||
// SUMMARY must use the Metric field "summary".
|
||||
MetricType_SUMMARY MetricType = 2
|
||||
// UNTYPED must use the Metric field "untyped".
|
||||
MetricType_UNTYPED MetricType = 3
|
||||
// HISTOGRAM must use the Metric field "histogram".
|
||||
MetricType_HISTOGRAM MetricType = 4
|
||||
// GAUGE_HISTOGRAM must use the Metric field "histogram".
|
||||
MetricType_GAUGE_HISTOGRAM MetricType = 5
|
||||
)
|
||||
|
||||
var MetricType_name = map[int32]string{
|
||||
|
|
@ -37,6 +44,7 @@ var MetricType_name = map[int32]string{
|
|||
2: "SUMMARY",
|
||||
3: "UNTYPED",
|
||||
4: "HISTOGRAM",
|
||||
5: "GAUGE_HISTOGRAM",
|
||||
}
|
||||
|
||||
var MetricType_value = map[string]int32{
|
||||
|
|
@ -45,6 +53,7 @@ var MetricType_value = map[string]int32{
|
|||
"SUMMARY": 2,
|
||||
"UNTYPED": 3,
|
||||
"HISTOGRAM": 4,
|
||||
"GAUGE_HISTOGRAM": 5,
|
||||
}
|
||||
|
||||
func (x MetricType) Enum() *MetricType {
|
||||
|
|
@ -67,7 +76,7 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
func (MetricType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{0}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{0}
|
||||
}
|
||||
|
||||
type LabelPair struct {
|
||||
|
|
@ -82,7 +91,7 @@ func (m *LabelPair) Reset() { *m = LabelPair{} }
|
|||
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
|
||||
func (*LabelPair) ProtoMessage() {}
|
||||
func (*LabelPair) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{0}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{0}
|
||||
}
|
||||
|
||||
func (m *LabelPair) XXX_Unmarshal(b []byte) error {
|
||||
|
|
@ -128,7 +137,7 @@ func (m *Gauge) Reset() { *m = Gauge{} }
|
|||
func (m *Gauge) String() string { return proto.CompactTextString(m) }
|
||||
func (*Gauge) ProtoMessage() {}
|
||||
func (*Gauge) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{1}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{1}
|
||||
}
|
||||
|
||||
func (m *Gauge) XXX_Unmarshal(b []byte) error {
|
||||
|
|
@ -168,7 +177,7 @@ func (m *Counter) Reset() { *m = Counter{} }
|
|||
func (m *Counter) String() string { return proto.CompactTextString(m) }
|
||||
func (*Counter) ProtoMessage() {}
|
||||
func (*Counter) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{2}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{2}
|
||||
}
|
||||
|
||||
func (m *Counter) XXX_Unmarshal(b []byte) error {
|
||||
|
|
@ -215,7 +224,7 @@ func (m *Quantile) Reset() { *m = Quantile{} }
|
|||
func (m *Quantile) String() string { return proto.CompactTextString(m) }
|
||||
func (*Quantile) ProtoMessage() {}
|
||||
func (*Quantile) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{3}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{3}
|
||||
}
|
||||
|
||||
func (m *Quantile) XXX_Unmarshal(b []byte) error {
|
||||
|
|
@ -263,7 +272,7 @@ func (m *Summary) Reset() { *m = Summary{} }
|
|||
func (m *Summary) String() string { return proto.CompactTextString(m) }
|
||||
func (*Summary) ProtoMessage() {}
|
||||
func (*Summary) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{4}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{4}
|
||||
}
|
||||
|
||||
func (m *Summary) XXX_Unmarshal(b []byte) error {
|
||||
|
|
@ -316,7 +325,7 @@ func (m *Untyped) Reset() { *m = Untyped{} }
|
|||
func (m *Untyped) String() string { return proto.CompactTextString(m) }
|
||||
func (*Untyped) ProtoMessage() {}
|
||||
func (*Untyped) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{5}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{5}
|
||||
}
|
||||
|
||||
func (m *Untyped) XXX_Unmarshal(b []byte) error {
|
||||
|
|
@ -346,8 +355,33 @@ func (m *Untyped) GetValue() float64 {
|
|||
|
||||
type Histogram struct {
|
||||
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
|
||||
SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"`
|
||||
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
|
||||
// Buckets for the conventional histogram.
|
||||
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
|
||||
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
|
||||
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
|
||||
// then each power of two is divided into 2^n logarithmic buckets.
|
||||
// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
|
||||
// In the future, more bucket schemas may be added using numbers < -4 or > 8.
|
||||
Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
|
||||
ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"`
|
||||
ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"`
|
||||
ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"`
|
||||
// Negative buckets for the native histogram.
|
||||
NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
|
||||
// Use either "negative_delta" or "negative_count", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"`
|
||||
NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"`
|
||||
// Positive buckets for the native histogram.
|
||||
PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
|
||||
// Use either "positive_delta" or "positive_count", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"`
|
||||
PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
|
|
@ -357,7 +391,7 @@ func (m *Histogram) Reset() { *m = Histogram{} }
|
|||
func (m *Histogram) String() string { return proto.CompactTextString(m) }
|
||||
func (*Histogram) ProtoMessage() {}
|
||||
func (*Histogram) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{6}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{6}
|
||||
}
|
||||
|
||||
func (m *Histogram) XXX_Unmarshal(b []byte) error {
|
||||
|
|
@ -385,6 +419,13 @@ func (m *Histogram) GetSampleCount() uint64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetSampleCountFloat() float64 {
|
||||
if m != nil && m.SampleCountFloat != nil {
|
||||
return *m.SampleCountFloat
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetSampleSum() float64 {
|
||||
if m != nil && m.SampleSum != nil {
|
||||
return *m.SampleSum
|
||||
|
|
@ -399,8 +440,81 @@ func (m *Histogram) GetBucket() []*Bucket {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetSchema() int32 {
|
||||
if m != nil && m.Schema != nil {
|
||||
return *m.Schema
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetZeroThreshold() float64 {
|
||||
if m != nil && m.ZeroThreshold != nil {
|
||||
return *m.ZeroThreshold
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetZeroCount() uint64 {
|
||||
if m != nil && m.ZeroCount != nil {
|
||||
return *m.ZeroCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetZeroCountFloat() float64 {
|
||||
if m != nil && m.ZeroCountFloat != nil {
|
||||
return *m.ZeroCountFloat
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Histogram) GetNegativeSpan() []*BucketSpan {
|
||||
if m != nil {
|
||||
return m.NegativeSpan
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetNegativeDelta() []int64 {
|
||||
if m != nil {
|
||||
return m.NegativeDelta
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetNegativeCount() []float64 {
|
||||
if m != nil {
|
||||
return m.NegativeCount
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetPositiveSpan() []*BucketSpan {
|
||||
if m != nil {
|
||||
return m.PositiveSpan
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetPositiveDelta() []int64 {
|
||||
if m != nil {
|
||||
return m.PositiveDelta
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Histogram) GetPositiveCount() []float64 {
|
||||
if m != nil {
|
||||
return m.PositiveCount
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A Bucket of a conventional histogram, each of which is treated as
|
||||
// an individual counter-like time series by Prometheus.
|
||||
type Bucket struct {
|
||||
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
|
||||
CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"`
|
||||
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
|
||||
Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
|
|
@ -412,7 +526,7 @@ func (m *Bucket) Reset() { *m = Bucket{} }
|
|||
func (m *Bucket) String() string { return proto.CompactTextString(m) }
|
||||
func (*Bucket) ProtoMessage() {}
|
||||
func (*Bucket) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{7}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{7}
|
||||
}
|
||||
|
||||
func (m *Bucket) XXX_Unmarshal(b []byte) error {
|
||||
|
|
@ -440,6 +554,13 @@ func (m *Bucket) GetCumulativeCount() uint64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *Bucket) GetCumulativeCountFloat() float64 {
|
||||
if m != nil && m.CumulativeCountFloat != nil {
|
||||
return *m.CumulativeCountFloat
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Bucket) GetUpperBound() float64 {
|
||||
if m != nil && m.UpperBound != nil {
|
||||
return *m.UpperBound
|
||||
|
|
@ -454,6 +575,59 @@ func (m *Bucket) GetExemplar() *Exemplar {
|
|||
return nil
|
||||
}
|
||||
|
||||
// A BucketSpan defines a number of consecutive buckets in a native
|
||||
// histogram with their offset. Logically, it would be more
|
||||
// straightforward to include the bucket counts in the Span. However,
|
||||
// the protobuf representation is more compact in the way the data is
|
||||
// structured here (with all the buckets in a single array separate
|
||||
// from the Spans).
|
||||
type BucketSpan struct {
|
||||
Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"`
|
||||
Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BucketSpan) Reset() { *m = BucketSpan{} }
|
||||
func (m *BucketSpan) String() string { return proto.CompactTextString(m) }
|
||||
func (*BucketSpan) ProtoMessage() {}
|
||||
func (*BucketSpan) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{8}
|
||||
}
|
||||
|
||||
func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BucketSpan.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BucketSpan) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BucketSpan.Merge(m, src)
|
||||
}
|
||||
func (m *BucketSpan) XXX_Size() int {
|
||||
return xxx_messageInfo_BucketSpan.Size(m)
|
||||
}
|
||||
func (m *BucketSpan) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BucketSpan.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
|
||||
|
||||
func (m *BucketSpan) GetOffset() int32 {
|
||||
if m != nil && m.Offset != nil {
|
||||
return *m.Offset
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *BucketSpan) GetLength() uint32 {
|
||||
if m != nil && m.Length != nil {
|
||||
return *m.Length
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Exemplar struct {
|
||||
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
|
||||
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
|
||||
|
|
@ -467,7 +641,7 @@ func (m *Exemplar) Reset() { *m = Exemplar{} }
|
|||
func (m *Exemplar) String() string { return proto.CompactTextString(m) }
|
||||
func (*Exemplar) ProtoMessage() {}
|
||||
func (*Exemplar) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{8}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{9}
|
||||
}
|
||||
|
||||
func (m *Exemplar) XXX_Unmarshal(b []byte) error {
|
||||
|
|
@ -526,7 +700,7 @@ func (m *Metric) Reset() { *m = Metric{} }
|
|||
func (m *Metric) String() string { return proto.CompactTextString(m) }
|
||||
func (*Metric) ProtoMessage() {}
|
||||
func (*Metric) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{9}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{10}
|
||||
}
|
||||
|
||||
func (m *Metric) XXX_Unmarshal(b []byte) error {
|
||||
|
|
@ -610,7 +784,7 @@ func (m *MetricFamily) Reset() { *m = MetricFamily{} }
|
|||
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
|
||||
func (*MetricFamily) ProtoMessage() {}
|
||||
func (*MetricFamily) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{10}
|
||||
return fileDescriptor_d1e5ddb18987a258, []int{11}
|
||||
}
|
||||
|
||||
func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
|
||||
|
|
@ -669,55 +843,72 @@ func init() {
|
|||
proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
|
||||
proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
|
||||
proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
|
||||
proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan")
|
||||
proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
|
||||
proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
|
||||
proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
|
||||
|
||||
var fileDescriptor_6039342a2ba47b72 = []byte{
|
||||
// 665 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
|
||||
0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
|
||||
0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
|
||||
0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
|
||||
0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
|
||||
0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
|
||||
0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
|
||||
0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
|
||||
0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
|
||||
0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
|
||||
0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
|
||||
0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
|
||||
0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
|
||||
0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
|
||||
0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
|
||||
0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
|
||||
0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
|
||||
0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
|
||||
0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
|
||||
0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
|
||||
0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
|
||||
0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
|
||||
0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
|
||||
0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
|
||||
0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
|
||||
0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
|
||||
0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
|
||||
0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
|
||||
0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
|
||||
0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
|
||||
0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
|
||||
0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
|
||||
0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
|
||||
0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
|
||||
0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
|
||||
0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
|
||||
0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
|
||||
0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
|
||||
0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
|
||||
0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
|
||||
0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
|
||||
0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
|
||||
func init() {
|
||||
proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258)
|
||||
}
|
||||
|
||||
var fileDescriptor_d1e5ddb18987a258 = []byte{
|
||||
// 896 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44,
|
||||
0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48,
|
||||
0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92,
|
||||
0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0,
|
||||
0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b,
|
||||
0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3,
|
||||
0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90,
|
||||
0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25,
|
||||
0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb,
|
||||
0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19,
|
||||
0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba,
|
||||
0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b,
|
||||
0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c,
|
||||
0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0,
|
||||
0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5,
|
||||
0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd,
|
||||
0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d,
|
||||
0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b,
|
||||
0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b,
|
||||
0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f,
|
||||
0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b,
|
||||
0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8,
|
||||
0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e,
|
||||
0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79,
|
||||
0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29,
|
||||
0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48,
|
||||
0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca,
|
||||
0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9,
|
||||
0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5,
|
||||
0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe,
|
||||
0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55,
|
||||
0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e,
|
||||
0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83,
|
||||
0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65,
|
||||
0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a,
|
||||
0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8,
|
||||
0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf,
|
||||
0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1,
|
||||
0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97,
|
||||
0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc,
|
||||
0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7,
|
||||
0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b,
|
||||
0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58,
|
||||
0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b,
|
||||
0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8,
|
||||
0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f,
|
||||
0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67,
|
||||
0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28,
|
||||
0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27,
|
||||
0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41,
|
||||
0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f,
|
||||
0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f,
|
||||
0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac,
|
||||
0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a,
|
||||
0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab,
|
||||
0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
package instance
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/scaleway/scaleway-sdk-go/internal/async"
|
||||
|
|
@ -54,3 +56,68 @@ func (s *API) WaitForImage(req *WaitForImageRequest, opts ...scw.RequestOption)
|
|||
}
|
||||
return image.(*Image), nil
|
||||
}
|
||||
|
||||
type UpdateImageRequest struct {
|
||||
Zone scw.Zone `json:"zone"`
|
||||
ImageID string `json:"id"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
Arch Arch `json:"arch,omitempty"`
|
||||
CreationDate *time.Time `json:"creation_date"`
|
||||
ModificationDate *time.Time `json:"modification_date"`
|
||||
ExtraVolumes map[string]*VolumeTemplate `json:"extra_volumes"`
|
||||
FromServer string `json:"from_server,omitempty"`
|
||||
Organization string `json:"organization"`
|
||||
Public bool `json:"public"`
|
||||
RootVolume *VolumeSummary `json:"root_volume,omitempty"`
|
||||
State ImageState `json:"state"`
|
||||
Project string `json:"project"`
|
||||
Tags *[]string `json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
type UpdateImageResponse struct {
|
||||
Image *Image
|
||||
}
|
||||
|
||||
func (s *API) UpdateImage(req *UpdateImageRequest, opts ...scw.RequestOption) (*UpdateImageResponse, error) {
|
||||
// This function is the equivalent of the private setImage function that is not usable because the json tags and
|
||||
// types are not compatible with what the compute API expects
|
||||
|
||||
if req.Project == "" {
|
||||
defaultProject, _ := s.client.GetDefaultProjectID()
|
||||
req.Project = defaultProject
|
||||
}
|
||||
if req.Organization == "" {
|
||||
defaultOrganization, _ := s.client.GetDefaultOrganizationID()
|
||||
req.Organization = defaultOrganization
|
||||
}
|
||||
if req.Zone == "" {
|
||||
defaultZone, _ := s.client.GetDefaultZone()
|
||||
req.Zone = defaultZone
|
||||
}
|
||||
if fmt.Sprint(req.Zone) == "" {
|
||||
return nil, errors.New("field Zone cannot be empty in request")
|
||||
}
|
||||
if fmt.Sprint(req.ImageID) == "" {
|
||||
return nil, errors.New("field ID cannot be empty in request")
|
||||
}
|
||||
|
||||
scwReq := &scw.ScalewayRequest{
|
||||
Method: "PUT",
|
||||
Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/images/" + fmt.Sprint(req.ImageID) + "",
|
||||
Headers: http.Header{},
|
||||
}
|
||||
|
||||
err := scwReq.SetBody(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resp UpdateImageResponse
|
||||
|
||||
err = s.client.Do(scwReq, &resp, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -3,7 +3,9 @@ package instance
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/scaleway/scaleway-sdk-go/internal/async"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/scaleway/scaleway-sdk-go/internal/errors"
|
||||
"github.com/scaleway/scaleway-sdk-go/scw"
|
||||
|
|
@ -330,3 +332,100 @@ func (v *NullableStringValue) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
return json.Marshal(v.Value)
|
||||
}
|
||||
|
||||
// WaitForPrivateNICRequest is used by WaitForPrivateNIC method.
|
||||
type WaitForPrivateNICRequest struct {
|
||||
ServerID string
|
||||
PrivateNicID string
|
||||
Zone scw.Zone
|
||||
Timeout *time.Duration
|
||||
RetryInterval *time.Duration
|
||||
}
|
||||
|
||||
// WaitForPrivateNIC wait for the private network to be in a "terminal state" before returning.
|
||||
// This function can be used to wait for the private network to be attached for example.
|
||||
func (s *API) WaitForPrivateNIC(req *WaitForPrivateNICRequest, opts ...scw.RequestOption) (*PrivateNIC, error) {
|
||||
timeout := defaultTimeout
|
||||
if req.Timeout != nil {
|
||||
timeout = *req.Timeout
|
||||
}
|
||||
retryInterval := defaultRetryInterval
|
||||
if req.RetryInterval != nil {
|
||||
retryInterval = *req.RetryInterval
|
||||
}
|
||||
|
||||
terminalStatus := map[PrivateNICState]struct{}{
|
||||
PrivateNICStateAvailable: {},
|
||||
PrivateNICStateSyncingError: {},
|
||||
}
|
||||
|
||||
pn, err := async.WaitSync(&async.WaitSyncConfig{
|
||||
Get: func() (interface{}, bool, error) {
|
||||
res, err := s.GetPrivateNIC(&GetPrivateNICRequest{
|
||||
ServerID: req.ServerID,
|
||||
Zone: req.Zone,
|
||||
PrivateNicID: req.PrivateNicID,
|
||||
}, opts...)
|
||||
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
_, isTerminal := terminalStatus[res.PrivateNic.State]
|
||||
|
||||
return res.PrivateNic, isTerminal, err
|
||||
},
|
||||
Timeout: timeout,
|
||||
IntervalStrategy: async.LinearIntervalStrategy(retryInterval),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "waiting for server failed")
|
||||
}
|
||||
return pn.(*PrivateNIC), nil
|
||||
}
|
||||
|
||||
// WaitForMACAddressRequest is used by WaitForMACAddress method.
|
||||
type WaitForMACAddressRequest struct {
|
||||
ServerID string
|
||||
PrivateNicID string
|
||||
Zone scw.Zone
|
||||
Timeout *time.Duration
|
||||
RetryInterval *time.Duration
|
||||
}
|
||||
|
||||
// WaitForMACAddress wait for the MAC address be assigned on instance before returning.
|
||||
// This function can be used to wait for the private network to be attached for example.
|
||||
func (s *API) WaitForMACAddress(req *WaitForMACAddressRequest, opts ...scw.RequestOption) (*PrivateNIC, error) {
|
||||
timeout := defaultTimeout
|
||||
if req.Timeout != nil {
|
||||
timeout = *req.Timeout
|
||||
}
|
||||
retryInterval := defaultRetryInterval
|
||||
if req.RetryInterval != nil {
|
||||
retryInterval = *req.RetryInterval
|
||||
}
|
||||
|
||||
pn, err := async.WaitSync(&async.WaitSyncConfig{
|
||||
Get: func() (interface{}, bool, error) {
|
||||
res, err := s.GetPrivateNIC(&GetPrivateNICRequest{
|
||||
ServerID: req.ServerID,
|
||||
Zone: req.Zone,
|
||||
PrivateNicID: req.PrivateNicID,
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if len(res.PrivateNic.MacAddress) > 0 {
|
||||
return res.PrivateNic, true, err
|
||||
}
|
||||
|
||||
return res.PrivateNic, false, err
|
||||
},
|
||||
Timeout: timeout,
|
||||
IntervalStrategy: async.LinearIntervalStrategy(retryInterval),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "waiting for server failed")
|
||||
}
|
||||
return pn.(*PrivateNIC), nil
|
||||
}
|
||||
|
|
|
|||
6
vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/security_group_utils.go
generated
vendored
6
vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/security_group_utils.go
generated
vendored
|
|
@ -20,6 +20,7 @@ type UpdateSecurityGroupRequest struct {
|
|||
OrganizationDefault *bool `json:"organization_default,omitempty"`
|
||||
ProjectDefault *bool `json:"project_default,omitempty"`
|
||||
EnableDefaultSecurity *bool `json:"enable_default_security,omitempty"`
|
||||
Tags *[]string `json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
type UpdateSecurityGroupResponse struct {
|
||||
|
|
@ -86,7 +87,7 @@ func (s *API) UpdateSecurityGroup(req *UpdateSecurityGroupRequest, opts ...scw.R
|
|||
setRequest.Stateful = *req.Stateful
|
||||
}
|
||||
if req.OrganizationDefault != nil {
|
||||
setRequest.OrganizationDefault = *req.OrganizationDefault
|
||||
setRequest.OrganizationDefault = req.OrganizationDefault
|
||||
}
|
||||
if req.ProjectDefault != nil {
|
||||
setRequest.ProjectDefault = *req.ProjectDefault
|
||||
|
|
@ -94,6 +95,9 @@ func (s *API) UpdateSecurityGroup(req *UpdateSecurityGroupRequest, opts ...scw.R
|
|||
if req.EnableDefaultSecurity != nil {
|
||||
setRequest.EnableDefaultSecurity = *req.EnableDefaultSecurity
|
||||
}
|
||||
if req.Tags != nil {
|
||||
setRequest.Tags = req.Tags
|
||||
}
|
||||
|
||||
setRes, err := s.setSecurityGroup(setRequest, opts...)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ type UpdateSnapshotRequest struct {
|
|||
Zone scw.Zone
|
||||
SnapshotID string
|
||||
Name *string `json:"name,omitempty"`
|
||||
Tags *[]string `json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
type UpdateSnapshotResponse struct {
|
||||
|
|
@ -106,6 +107,10 @@ func (s *API) UpdateSnapshot(req *UpdateSnapshotRequest, opts ...scw.RequestOpti
|
|||
setRequest.Name = *req.Name
|
||||
}
|
||||
|
||||
if req.Tags != nil {
|
||||
setRequest.Tags = req.Tags
|
||||
}
|
||||
|
||||
setRes, err := s.setSnapshot(setRequest, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
33
vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v1/marketplace_sdk.go
generated
vendored
33
vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v1/marketplace_sdk.go
generated
vendored
|
|
@ -55,14 +55,6 @@ type GetImageResponse struct {
|
|||
Image *Image `json:"image"`
|
||||
}
|
||||
|
||||
type GetServiceInfoResponse struct {
|
||||
API string `json:"api"`
|
||||
|
||||
Description string `json:"description"`
|
||||
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type GetVersionResponse struct {
|
||||
Version *Version `json:"version"`
|
||||
}
|
||||
|
|
@ -86,6 +78,8 @@ type Image struct {
|
|||
// ValidUntil: expiration date of this image
|
||||
ValidUntil *time.Time `json:"valid_until"`
|
||||
// Label: label of this image
|
||||
//
|
||||
// Typically an identifier for a distribution (ex. "ubuntu_focal").
|
||||
Label string `json:"label"`
|
||||
// Versions: list of versions of this image
|
||||
Versions []*Version `json:"versions"`
|
||||
|
|
@ -110,6 +104,8 @@ type ListVersionsResponse struct {
|
|||
// LocalImage: local image
|
||||
type LocalImage struct {
|
||||
// ID: UUID of this local image
|
||||
//
|
||||
// Version you will typically use to define an image in an API call.
|
||||
ID string `json:"id"`
|
||||
// CompatibleCommercialTypes: list of all commercial types that are compatible with this local image
|
||||
CompatibleCommercialTypes []string `json:"compatible_commercial_types"`
|
||||
|
|
@ -141,27 +137,6 @@ type Version struct {
|
|||
|
||||
// Service API
|
||||
|
||||
type GetServiceInfoRequest struct {
|
||||
}
|
||||
|
||||
func (s *API) GetServiceInfo(req *GetServiceInfoRequest, opts ...scw.RequestOption) (*GetServiceInfoResponse, error) {
|
||||
var err error
|
||||
|
||||
scwReq := &scw.ScalewayRequest{
|
||||
Method: "GET",
|
||||
Path: "/marketplace/v1",
|
||||
Headers: http.Header{},
|
||||
}
|
||||
|
||||
var resp GetServiceInfoResponse
|
||||
|
||||
err = s.client.Do(scwReq, &resp, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
type ListImagesRequest struct {
|
||||
// PerPage: a positive integer lower or equal to 100 to select the number of items to display
|
||||
PerPage *uint32 `json:"-"`
|
||||
|
|
|
|||
17
vendor/github.com/scaleway/scaleway-sdk-go/internal/generic/sort.go
generated
vendored
Normal file
17
vendor/github.com/scaleway/scaleway-sdk-go/internal/generic/sort.go
generated
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
package generic
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// SortSliceByField sorts given slice of struct by passing the specified field to given compare function
|
||||
// given slice must be a slice of Ptr
|
||||
func SortSliceByField(list interface{}, field string, compare func(interface{}, interface{}) bool) {
|
||||
listValue := reflect.ValueOf(list)
|
||||
sort.SliceStable(list, func(i, j int) bool {
|
||||
field1 := listValue.Index(i).Elem().FieldByName(field).Interface()
|
||||
field2 := listValue.Index(j).Elem().FieldByName(field).Interface()
|
||||
return compare(field1, field2)
|
||||
})
|
||||
}
|
||||
|
|
@ -2,6 +2,7 @@ package parameter
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"time"
|
||||
|
|
@ -21,6 +22,8 @@ func AddToQuery(query url.Values, key string, value interface{}) {
|
|||
|
||||
elemType := elemValue.Type()
|
||||
switch {
|
||||
case elemType == reflect.TypeOf(net.IP{}):
|
||||
query.Add(key, value.(*net.IP).String())
|
||||
case elemType.Kind() == reflect.Slice:
|
||||
for i := 0; i < elemValue.Len(); i++ {
|
||||
query.Add(key, fmt.Sprint(elemValue.Index(i).Interface()))
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package scw
|
|||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
|
|
@ -10,11 +11,14 @@ import (
|
|||
"net/http/httputil"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/scaleway/scaleway-sdk-go/internal/auth"
|
||||
"github.com/scaleway/scaleway-sdk-go/internal/errors"
|
||||
"github.com/scaleway/scaleway-sdk-go/internal/generic"
|
||||
"github.com/scaleway/scaleway-sdk-go/logger"
|
||||
)
|
||||
|
||||
|
|
@ -164,6 +168,13 @@ func (c *Client) Do(req *ScalewayRequest, res interface{}, opts ...RequestOption
|
|||
req.auth = c.auth
|
||||
}
|
||||
|
||||
if req.zones != nil {
|
||||
return c.doListZones(req, res, req.zones)
|
||||
}
|
||||
if req.regions != nil {
|
||||
return c.doListRegions(req, res, req.regions)
|
||||
}
|
||||
|
||||
if req.allPages {
|
||||
return c.doListAll(req, res)
|
||||
}
|
||||
|
|
@ -340,6 +351,182 @@ func (c *Client) doListAll(req *ScalewayRequest, res interface{}) (err error) {
|
|||
return errors.New("%T does not support pagination", res)
|
||||
}
|
||||
|
||||
// doListLocalities collects all localities using mutliple list requests and aggregate all results on a lister response
|
||||
// results is sorted by locality
|
||||
func (c *Client) doListLocalities(req *ScalewayRequest, res lister, localities []string) (err error) {
|
||||
path := req.Path
|
||||
if !strings.Contains(path, "%locality%") {
|
||||
return fmt.Errorf("request is not a valid locality request")
|
||||
}
|
||||
// Requests are parallelized
|
||||
responseMutex := sync.Mutex{}
|
||||
requestGroup := sync.WaitGroup{}
|
||||
errChan := make(chan error, len(localities))
|
||||
|
||||
requestGroup.Add(len(localities))
|
||||
for _, locality := range localities {
|
||||
go func(locality string) {
|
||||
defer requestGroup.Done()
|
||||
// Request is cloned as doListAll will change header
|
||||
// We remove zones as it would recurse in the same function
|
||||
req := req.clone()
|
||||
req.zones = []Zone(nil)
|
||||
req.Path = strings.ReplaceAll(path, "%locality%", locality)
|
||||
|
||||
// We create a new response that we append to main response
|
||||
zoneResponse := newVariableFromType(res)
|
||||
err := c.Do(req, zoneResponse)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
responseMutex.Lock()
|
||||
_, err = res.UnsafeAppend(zoneResponse)
|
||||
responseMutex.Unlock()
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}(locality)
|
||||
}
|
||||
requestGroup.Wait()
|
||||
|
||||
L: // We gather potential errors and return them all together
|
||||
for {
|
||||
select {
|
||||
case newErr := <-errChan:
|
||||
err = errors.Wrap(err, newErr.Error())
|
||||
default:
|
||||
break L
|
||||
}
|
||||
}
|
||||
close(errChan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// doListZones collects all zones using multiple list requests and aggregate all results on a single response.
|
||||
// result is sorted by zone
|
||||
func (c *Client) doListZones(req *ScalewayRequest, res interface{}, zones []Zone) (err error) {
|
||||
if response, isLister := res.(lister); isLister {
|
||||
// Prepare request with %zone% that can be replaced with actual zone
|
||||
for _, zone := range AllZones {
|
||||
if strings.Contains(req.Path, string(zone)) {
|
||||
req.Path = strings.ReplaceAll(req.Path, string(zone), "%locality%")
|
||||
break
|
||||
}
|
||||
}
|
||||
if !strings.Contains(req.Path, "%locality%") {
|
||||
return fmt.Errorf("request is not a valid zoned request")
|
||||
}
|
||||
localities := make([]string, 0, len(zones))
|
||||
for _, zone := range zones {
|
||||
localities = append(localities, string(zone))
|
||||
}
|
||||
|
||||
err := c.doListLocalities(req, response, localities)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list localities: %w", err)
|
||||
}
|
||||
|
||||
sortResponseByZones(res, zones)
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("%T does not support pagination", res)
|
||||
}
|
||||
|
||||
// doListRegions collects all regions using multiple list requests and aggregate all results on a single response.
|
||||
// result is sorted by region
|
||||
func (c *Client) doListRegions(req *ScalewayRequest, res interface{}, regions []Region) (err error) {
|
||||
if response, isLister := res.(lister); isLister {
|
||||
// Prepare request with %locality% that can be replaced with actual region
|
||||
for _, region := range AllRegions {
|
||||
if strings.Contains(req.Path, string(region)) {
|
||||
req.Path = strings.ReplaceAll(req.Path, string(region), "%locality%")
|
||||
break
|
||||
}
|
||||
}
|
||||
if !strings.Contains(req.Path, "%locality%") {
|
||||
return fmt.Errorf("request is not a valid zoned request")
|
||||
}
|
||||
localities := make([]string, 0, len(regions))
|
||||
for _, region := range regions {
|
||||
localities = append(localities, string(region))
|
||||
}
|
||||
|
||||
err := c.doListLocalities(req, response, localities)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list localities: %w", err)
|
||||
}
|
||||
|
||||
sortResponseByRegions(res, regions)
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("%T does not support pagination", res)
|
||||
}
|
||||
|
||||
// sortSliceByZones sorts a slice of struct using a Zone field that should exist
|
||||
func sortSliceByZones(list interface{}, zones []Zone) {
|
||||
zoneMap := map[Zone]int{}
|
||||
for i, zone := range zones {
|
||||
zoneMap[zone] = i
|
||||
}
|
||||
generic.SortSliceByField(list, "Zone", func(i interface{}, i2 interface{}) bool {
|
||||
return zoneMap[i.(Zone)] < zoneMap[i2.(Zone)]
|
||||
})
|
||||
}
|
||||
|
||||
// sortSliceByRegions sorts a slice of struct using a Region field that should exist
|
||||
func sortSliceByRegions(list interface{}, regions []Region) {
|
||||
regionMap := map[Region]int{}
|
||||
for i, region := range regions {
|
||||
regionMap[region] = i
|
||||
}
|
||||
generic.SortSliceByField(list, "Region", func(i interface{}, i2 interface{}) bool {
|
||||
return regionMap[i.(Region)] < regionMap[i2.(Region)]
|
||||
})
|
||||
}
|
||||
|
||||
// sortResponseByZones find first field that is a slice in a struct and sort it by zone
|
||||
func sortResponseByZones(res interface{}, zones []Zone) {
|
||||
// res may be ListServersResponse
|
||||
//
|
||||
// type ListServersResponse struct {
|
||||
// TotalCount uint32 `json:"total_count"`
|
||||
// Servers []*Server `json:"servers"`
|
||||
// }
|
||||
// We iterate over fields searching for the slice one to sort it
|
||||
resType := reflect.TypeOf(res).Elem()
|
||||
fields := reflect.VisibleFields(resType)
|
||||
for _, field := range fields {
|
||||
if field.Type.Kind() == reflect.Slice {
|
||||
sortSliceByZones(reflect.ValueOf(res).Elem().FieldByName(field.Name).Interface(), zones)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sortResponseByRegions find first field that is a slice in a struct and sort it by region
|
||||
func sortResponseByRegions(res interface{}, regions []Region) {
|
||||
// res may be ListServersResponse
|
||||
//
|
||||
// type ListServersResponse struct {
|
||||
// TotalCount uint32 `json:"total_count"`
|
||||
// Servers []*Server `json:"servers"`
|
||||
// }
|
||||
// We iterate over fields searching for the slice one to sort it
|
||||
resType := reflect.TypeOf(res).Elem()
|
||||
fields := reflect.VisibleFields(resType)
|
||||
for _, field := range fields {
|
||||
if field.Type.Kind() == reflect.Slice {
|
||||
sortSliceByRegions(reflect.ValueOf(res).Elem().FieldByName(field.Name).Interface(), regions)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newVariableFromType returns a variable set to the zero value of the given type
|
||||
func newVariableFromType(t interface{}) interface{} {
|
||||
// reflect.New always create a pointer, that's why we use reflect.Indirect before
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package scw
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
|
@ -338,3 +339,81 @@ func splitFloatString(input string) (units int64, nanos int32, err error) {
|
|||
|
||||
return units, nanos, nil
|
||||
}
|
||||
|
||||
// JSONObject represent any JSON object. See struct.proto.
|
||||
// It will be marshaled into a json string.
|
||||
// This type can be used just like any other map.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// values := scw.JSONValue{
|
||||
// "Foo": "Bar",
|
||||
// }
|
||||
// values["Baz"] = "Qux"
|
||||
type JSONObject map[string]interface{}
|
||||
|
||||
// EscapeMode is the mode that should be use for escaping a value
|
||||
type EscapeMode uint
|
||||
|
||||
// The modes for escaping a value before it is marshaled, and unmarshalled.
|
||||
const (
|
||||
NoEscape EscapeMode = iota
|
||||
Base64Escape
|
||||
QuotedEscape
|
||||
)
|
||||
|
||||
// DecodeJSONObject will attempt to decode the string input as a JSONValue.
|
||||
// Optionally decoding base64 the value first before JSON unmarshalling.
|
||||
//
|
||||
// Will panic if the escape mode is unknown.
|
||||
func DecodeJSONObject(v string, escape EscapeMode) (JSONObject, error) {
|
||||
var b []byte
|
||||
var err error
|
||||
|
||||
switch escape {
|
||||
case NoEscape:
|
||||
b = []byte(v)
|
||||
case Base64Escape:
|
||||
b, err = base64.StdEncoding.DecodeString(v)
|
||||
case QuotedEscape:
|
||||
var u string
|
||||
u, err = strconv.Unquote(v)
|
||||
b = []byte(u)
|
||||
default:
|
||||
panic(fmt.Sprintf("DecodeJSONObject called with unknown EscapeMode, %v", escape))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := JSONObject{}
|
||||
err = json.Unmarshal(b, &m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// EncodeJSONObject marshals the value into a JSON string, and optionally base64
|
||||
// encodes the string before returning it.
|
||||
//
|
||||
// Will panic if the escape mode is unknown.
|
||||
func EncodeJSONObject(v JSONObject, escape EscapeMode) (string, error) {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch escape {
|
||||
case NoEscape:
|
||||
return string(b), nil
|
||||
case Base64Escape:
|
||||
return base64.StdEncoding.EncodeToString(b), nil
|
||||
case QuotedEscape:
|
||||
return strconv.Quote(string(b)), nil
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("EncodeJSONObject called with unknown EscapeMode, %v", escape))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ const (
|
|||
ScwDefaultProjectIDEnv = "SCW_DEFAULT_PROJECT_ID"
|
||||
ScwDefaultRegionEnv = "SCW_DEFAULT_REGION"
|
||||
ScwDefaultZoneEnv = "SCW_DEFAULT_ZONE"
|
||||
ScwEnableBeta = "SCW_ENABLE_BETA"
|
||||
DebugEnv = logger.DebugEnv
|
||||
|
||||
// All deprecated (cli&terraform)
|
||||
|
|
|
|||
|
|
@ -346,7 +346,7 @@ func (e *QuotasExceededError) IsScwSdkError() {}
|
|||
func (e *QuotasExceededError) Error() string {
|
||||
invalidArgs := make([]string, len(e.Details))
|
||||
for i, d := range e.Details {
|
||||
invalidArgs[i] = fmt.Sprintf("%s has reached its quota (%d/%d)", d.Resource, d.Current, d.Current)
|
||||
invalidArgs[i] = fmt.Sprintf("%s has reached its quota (%d/%d)", d.Resource, d.Current, d.Quota)
|
||||
}
|
||||
|
||||
return "scaleway-sdk-go: quota exceeded(s): " + strings.Join(invalidArgs, "; ")
|
||||
|
|
@ -525,7 +525,7 @@ func (r DeniedAuthenticationError) IsScwSdkError() {}
|
|||
|
||||
// PreconditionFailedError implements the SdkError interface
|
||||
type PreconditionFailedError struct {
|
||||
Precondition string `json:"method"`
|
||||
Precondition string `json:"precondition"`
|
||||
HelpMessage string `json:"help_message"`
|
||||
|
||||
RawBody json.RawMessage `json:"-"`
|
||||
|
|
|
|||
|
|
@ -29,6 +29,8 @@ const (
|
|||
ZoneNlAms2 = Zone("nl-ams-2")
|
||||
// ZonePlWaw1 represents the pl-waw-1 zone
|
||||
ZonePlWaw1 = Zone("pl-waw-1")
|
||||
// ZonePlWaw2 represents the pl-waw-2 zone
|
||||
ZonePlWaw2 = Zone("pl-waw-2")
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -40,6 +42,7 @@ var (
|
|||
ZoneNlAms1,
|
||||
ZoneNlAms2,
|
||||
ZonePlWaw1,
|
||||
ZonePlWaw2,
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -108,7 +111,7 @@ func (region Region) GetZones() []Zone {
|
|||
case RegionNlAms:
|
||||
return []Zone{ZoneNlAms1, ZoneNlAms2}
|
||||
case RegionPlWaw:
|
||||
return []Zone{ZonePlWaw1}
|
||||
return []Zone{ZonePlWaw1, ZonePlWaw2}
|
||||
default:
|
||||
return []Zone{}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,6 +24,8 @@ type ScalewayRequest struct {
|
|||
ctx context.Context
|
||||
auth auth.Auth
|
||||
allPages bool
|
||||
zones []Zone
|
||||
regions []Region
|
||||
}
|
||||
|
||||
// getAllHeaders constructs a http.Header object and aggregates all headers into the object.
|
||||
|
|
@ -102,3 +104,19 @@ func (req *ScalewayRequest) validate() error {
|
|||
// nothing so far
|
||||
return nil
|
||||
}
|
||||
|
||||
func (req *ScalewayRequest) clone() *ScalewayRequest {
|
||||
clonedReq := &ScalewayRequest{
|
||||
Method: req.Method,
|
||||
Path: req.Path,
|
||||
Headers: req.Headers.Clone(),
|
||||
ctx: req.ctx,
|
||||
auth: req.auth,
|
||||
allPages: req.allPages,
|
||||
zones: req.zones,
|
||||
}
|
||||
if req.Query != nil {
|
||||
clonedReq.Query = url.Values(http.Header(req.Query).Clone())
|
||||
}
|
||||
return clonedReq
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,3 +30,21 @@ func WithAuthRequest(accessKey, secretKey string) RequestOption {
|
|||
s.auth = auth.NewToken(accessKey, secretKey)
|
||||
}
|
||||
}
|
||||
|
||||
// WithZones aggregate results from requested zones in the response of a List request.
|
||||
// response rows are sorted by zone using order of given zones
|
||||
// Will error when pagination is not supported on the request.
|
||||
func WithZones(zones ...Zone) RequestOption {
|
||||
return func(s *ScalewayRequest) {
|
||||
s.zones = append(s.zones, zones...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithRegions aggregate results from requested regions in the response of a List request.
|
||||
// response rows are sorted by region using order of given regions
|
||||
// Will error when pagination is not supported on the request.
|
||||
func WithRegions(regions ...Region) RequestOption {
|
||||
return func(s *ScalewayRequest) {
|
||||
s.regions = append(s.regions, regions...)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,26 +0,0 @@
|
|||
sudo: false
|
||||
language: go
|
||||
arch:
|
||||
- amd64
|
||||
- ppc64e
|
||||
|
||||
go:
|
||||
- "1.14"
|
||||
- "1.15"
|
||||
- "1.16"
|
||||
- tip
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
fast_finish: true
|
||||
|
||||
script:
|
||||
- go build -v ./...
|
||||
- go test -count=1 -cover -race -v ./...
|
||||
- go vet ./...
|
||||
- FILES=$(gofmt -s -l . zipfs sftpfs mem tarfs); if [[ -n "${FILES}" ]]; then echo "You have go format errors; gofmt your changes"; exit 1; fi
|
||||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
A FileSystem Abstraction System for Go
|
||||
|
||||
[](https://travis-ci.org/spf13/afero) [](https://ci.appveyor.com/project/spf13/afero) [](https://godoc.org/github.com/spf13/afero) [](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
[](https://github.com/spf13/afero/actions/workflows/test.yml) [](https://godoc.org/github.com/spf13/afero) [](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
# Overview
|
||||
|
||||
|
|
|
|||
|
|
@ -103,8 +103,8 @@ type Fs interface {
|
|||
|
||||
var (
|
||||
ErrFileClosed = errors.New("File is closed")
|
||||
ErrOutOfRange = errors.New("Out of range")
|
||||
ErrTooLarge = errors.New("Too large")
|
||||
ErrOutOfRange = errors.New("out of range")
|
||||
ErrTooLarge = errors.New("too large")
|
||||
ErrFileNotFound = os.ErrNotExist
|
||||
ErrFileExists = os.ErrExist
|
||||
ErrDestinationExists = os.ErrExist
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
# This currently does nothing. We have moved to GitHub action, but this is kept
|
||||
# until spf13 has disabled this project in AppVeyor.
|
||||
version: '{build}'
|
||||
clone_folder: C:\gopath\src\github.com\spf13\afero
|
||||
environment:
|
||||
|
|
@ -6,10 +8,3 @@ build_script:
|
|||
- cmd: >-
|
||||
go version
|
||||
|
||||
go env
|
||||
|
||||
go get -v github.com/spf13/afero/...
|
||||
|
||||
go build -v github.com/spf13/afero/...
|
||||
test_script:
|
||||
- cmd: go test -count=1 -cover -race -v github.com/spf13/afero/...
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package afero
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
|
@ -8,7 +9,10 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
var _ Lstater = (*BasePathFs)(nil)
|
||||
var (
|
||||
_ Lstater = (*BasePathFs)(nil)
|
||||
_ fs.ReadDirFile = (*BasePathFile)(nil)
|
||||
)
|
||||
|
||||
// The BasePathFs restricts all operations to a given path within an Fs.
|
||||
// The given file name to the operations on this Fs will be prepended with
|
||||
|
|
@ -33,6 +37,14 @@ func (f *BasePathFile) Name() string {
|
|||
return strings.TrimPrefix(sourcename, filepath.Clean(f.path))
|
||||
}
|
||||
|
||||
func (f *BasePathFile) ReadDir(n int) ([]fs.DirEntry, error) {
|
||||
if rdf, ok := f.File.(fs.ReadDirFile); ok {
|
||||
return rdf.ReadDir(n)
|
||||
|
||||
}
|
||||
return readDirFile{f.File}.ReadDir(n)
|
||||
}
|
||||
|
||||
func NewBasePathFs(source Fs, path string) Fs {
|
||||
return &BasePathFs{source: source, path: path}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly
|
||||
// +build aix darwin openbsd freebsd netbsd dragonfly
|
||||
|
||||
package afero
|
||||
|
|
|
|||
|
|
@ -10,12 +10,8 @@
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// +build !darwin
|
||||
// +build !openbsd
|
||||
// +build !freebsd
|
||||
// +build !dragonfly
|
||||
// +build !netbsd
|
||||
// +build !aix
|
||||
//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix
|
||||
// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix
|
||||
|
||||
package afero
|
||||
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ type httpDir struct {
|
|||
}
|
||||
|
||||
func (d httpDir) Open(name string) (http.File, error) {
|
||||
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
|
||||
if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) ||
|
||||
strings.Contains(name, "\x00") {
|
||||
return nil, errors.New("http: invalid character in file path")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,27 @@
|
|||
// Copyright © 2022 Steve Francia <spf@spf13.com>.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package common
|
||||
|
||||
import "io/fs"
|
||||
|
||||
// FileInfoDirEntry provides an adapter from os.FileInfo to fs.DirEntry
|
||||
type FileInfoDirEntry struct {
|
||||
fs.FileInfo
|
||||
}
|
||||
|
||||
var _ fs.DirEntry = FileInfoDirEntry{}
|
||||
|
||||
func (d FileInfoDirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() }
|
||||
|
||||
func (d FileInfoDirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil }
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
//go:build go1.16
|
||||
// +build go1.16
|
||||
|
||||
package afero
|
||||
|
|
@ -7,7 +8,10 @@ import (
|
|||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/afero/internal/common"
|
||||
)
|
||||
|
||||
// IOFS adopts afero.Fs to stdlib io/fs.FS
|
||||
|
|
@ -66,14 +70,31 @@ func (iofs IOFS) Glob(pattern string) ([]string, error) {
|
|||
}
|
||||
|
||||
func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) {
|
||||
items, err := ReadDir(iofs.Fs, name)
|
||||
f, err := iofs.Fs.Open(name)
|
||||
if err != nil {
|
||||
return nil, iofs.wrapError("readdir", name, err)
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
if rdf, ok := f.(fs.ReadDirFile); ok {
|
||||
items, err := rdf.ReadDir(-1)
|
||||
if err != nil {
|
||||
return nil, iofs.wrapError("readdir", name, err)
|
||||
}
|
||||
sort.Slice(items, func(i, j int) bool { return items[i].Name() < items[j].Name() })
|
||||
return items, nil
|
||||
}
|
||||
|
||||
items, err := f.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, iofs.wrapError("readdir", name, err)
|
||||
}
|
||||
sort.Sort(byName(items))
|
||||
|
||||
ret := make([]fs.DirEntry, len(items))
|
||||
for i := range items {
|
||||
ret[i] = dirEntry{items[i]}
|
||||
ret[i] = common.FileInfoDirEntry{FileInfo: items[i]}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
|
|
@ -108,17 +129,6 @@ func (IOFS) wrapError(op, path string, err error) error {
|
|||
}
|
||||
}
|
||||
|
||||
// dirEntry provides adapter from os.FileInfo to fs.DirEntry
|
||||
type dirEntry struct {
|
||||
fs.FileInfo
|
||||
}
|
||||
|
||||
var _ fs.DirEntry = dirEntry{}
|
||||
|
||||
func (d dirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() }
|
||||
|
||||
func (d dirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil }
|
||||
|
||||
// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open
|
||||
type readDirFile struct {
|
||||
File
|
||||
|
|
@ -134,7 +144,7 @@ func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) {
|
|||
|
||||
ret := make([]fs.DirEntry, len(items))
|
||||
for i := range items {
|
||||
ret[i] = dirEntry{items[i]}
|
||||
ret[i] = common.FileInfoDirEntry{FileInfo: items[i]}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error {
|
|||
// We generate random temporary file names so that there's a good
|
||||
// chance the file doesn't exist yet - keeps the number of tries in
|
||||
// TempFile to a minimum.
|
||||
var rand uint32
|
||||
var randNum uint32
|
||||
var randmu sync.Mutex
|
||||
|
||||
func reseed() uint32 {
|
||||
|
|
@ -150,12 +150,12 @@ func reseed() uint32 {
|
|||
|
||||
func nextRandom() string {
|
||||
randmu.Lock()
|
||||
r := rand
|
||||
r := randNum
|
||||
if r == 0 {
|
||||
r = reseed()
|
||||
}
|
||||
r = r*1664525 + 1013904223 // constants from Numerical Recipes
|
||||
rand = r
|
||||
randNum = r
|
||||
randmu.Unlock()
|
||||
return strconv.Itoa(int(1e9 + r%1e9))[1:]
|
||||
}
|
||||
|
|
@ -194,7 +194,7 @@ func TempFile(fs Fs, dir, pattern string) (f File, err error) {
|
|||
if os.IsExist(err) {
|
||||
if nconflict++; nconflict > 10 {
|
||||
randmu.Lock()
|
||||
rand = reseed()
|
||||
randNum = reseed()
|
||||
randmu.Unlock()
|
||||
}
|
||||
continue
|
||||
|
|
@ -226,7 +226,7 @@ func TempDir(fs Fs, dir, prefix string) (name string, err error) {
|
|||
if os.IsExist(err) {
|
||||
if nconflict++; nconflict > 10 {
|
||||
randmu.Lock()
|
||||
rand = reseed()
|
||||
randNum = reseed()
|
||||
randmu.Unlock()
|
||||
}
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -18,15 +18,20 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/afero/internal/common"
|
||||
)
|
||||
|
||||
const FilePathSeparator = string(filepath.Separator)
|
||||
|
||||
var _ fs.ReadDirFile = &File{}
|
||||
|
||||
type File struct {
|
||||
// atomic requires 64-bit alignment for struct field access
|
||||
at int64
|
||||
|
|
@ -183,10 +188,23 @@ func (f *File) Readdirnames(n int) (names []string, err error) {
|
|||
return names, err
|
||||
}
|
||||
|
||||
// Implements fs.ReadDirFile
|
||||
func (f *File) ReadDir(n int) ([]fs.DirEntry, error) {
|
||||
fi, err := f.Readdir(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
di := make([]fs.DirEntry, len(fi))
|
||||
for i, f := range fi {
|
||||
di[i] = common.FileInfoDirEntry{FileInfo: f}
|
||||
}
|
||||
return di, nil
|
||||
}
|
||||
|
||||
func (f *File) Read(b []byte) (n int, err error) {
|
||||
f.fileData.Lock()
|
||||
defer f.fileData.Unlock()
|
||||
if f.closed == true {
|
||||
if f.closed {
|
||||
return 0, ErrFileClosed
|
||||
}
|
||||
if len(b) > 0 && int(f.at) == len(f.fileData.data) {
|
||||
|
|
@ -214,7 +232,7 @@ func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
|
|||
}
|
||||
|
||||
func (f *File) Truncate(size int64) error {
|
||||
if f.closed == true {
|
||||
if f.closed {
|
||||
return ErrFileClosed
|
||||
}
|
||||
if f.readOnly {
|
||||
|
|
@ -236,7 +254,7 @@ func (f *File) Truncate(size int64) error {
|
|||
}
|
||||
|
||||
func (f *File) Seek(offset int64, whence int) (int64, error) {
|
||||
if f.closed == true {
|
||||
if f.closed {
|
||||
return 0, ErrFileClosed
|
||||
}
|
||||
switch whence {
|
||||
|
|
@ -251,7 +269,7 @@ func (f *File) Seek(offset int64, whence int) (int64, error) {
|
|||
}
|
||||
|
||||
func (f *File) Write(b []byte) (n int, err error) {
|
||||
if f.closed == true {
|
||||
if f.closed {
|
||||
return 0, ErrFileClosed
|
||||
}
|
||||
if f.readOnly {
|
||||
|
|
@ -330,8 +348,8 @@ func (s *FileInfo) Size() int64 {
|
|||
|
||||
var (
|
||||
ErrFileClosed = errors.New("File is closed")
|
||||
ErrOutOfRange = errors.New("Out of range")
|
||||
ErrTooLarge = errors.New("Too large")
|
||||
ErrOutOfRange = errors.New("out of range")
|
||||
ErrTooLarge = errors.New("too large")
|
||||
ErrFileNotFound = os.ErrNotExist
|
||||
ErrFileExists = os.ErrExist
|
||||
ErrDestinationExists = os.ErrExist
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) {
|
|||
if f.Layer != nil {
|
||||
n, err := f.Layer.ReadAt(s, o)
|
||||
if (err == nil || err == io.EOF) && f.Base != nil {
|
||||
_, err = f.Base.Seek(o+int64(n), os.SEEK_SET)
|
||||
_, err = f.Base.Seek(o+int64(n), io.SeekStart)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import (
|
|||
"strings"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/runes"
|
||||
"golang.org/x/text/transform"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
|
@ -158,16 +159,12 @@ func UnicodeSanitize(s string) string {
|
|||
|
||||
// Transform characters with accents into plain forms.
|
||||
func NeuterAccents(s string) string {
|
||||
t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
|
||||
t := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC)
|
||||
result, _, _ := transform.String(t, string(s))
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func isMn(r rune) bool {
|
||||
return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks
|
||||
}
|
||||
|
||||
func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) {
|
||||
return FileContainsBytes(a.Fs, filename, subslice)
|
||||
}
|
||||
|
|
@ -299,6 +296,9 @@ func IsEmpty(fs Fs, path string) (bool, error) {
|
|||
}
|
||||
defer f.Close()
|
||||
list, err := f.Readdir(-1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(list) == 0, nil
|
||||
}
|
||||
return fi.Size() == 0, nil
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
[](https://github.com/spf13/viper/actions?query=workflow%3ACI)
|
||||
[](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
[](https://goreportcard.com/report/github.com/spf13/viper)
|
||||

|
||||

|
||||
[](https://pkg.go.dev/mod/github.com/spf13/viper)
|
||||
|
||||
**Go configuration with fangs!**
|
||||
|
|
|
|||
|
|
@ -463,9 +463,8 @@ func (v *Viper) WatchConfig() {
|
|||
// we only care about the config file with the following cases:
|
||||
// 1 - if the config file was modified or created
|
||||
// 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement)
|
||||
const writeOrCreateMask = fsnotify.Write | fsnotify.Create
|
||||
if (filepath.Clean(event.Name) == configFile &&
|
||||
event.Op&writeOrCreateMask != 0) ||
|
||||
(event.Has(fsnotify.Write) || event.Has(fsnotify.Create))) ||
|
||||
(currentConfigFile != "" && currentConfigFile != realConfigFile) {
|
||||
realConfigFile = currentConfigFile
|
||||
err := v.ReadInConfig()
|
||||
|
|
@ -475,8 +474,7 @@ func (v *Viper) WatchConfig() {
|
|||
if v.onConfigChange != nil {
|
||||
v.onConfigChange(event)
|
||||
}
|
||||
} else if filepath.Clean(event.Name) == configFile &&
|
||||
event.Op&fsnotify.Remove != 0 {
|
||||
} else if filepath.Clean(event.Name) == configFile && event.Has(fsnotify.Remove) {
|
||||
eventsWG.Done()
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
//go:build !js
|
||||
// +build !js
|
||||
//go:build darwin || dragonfly || freebsd || openbsd || linux || netbsd || solaris || windows
|
||||
// +build darwin dragonfly freebsd openbsd linux netbsd solaris windows
|
||||
|
||||
package viper
|
||||
|
||||
|
|
|
|||
|
|
@ -1,13 +1,19 @@
|
|||
// +build js,wasm
|
||||
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
|
||||
// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows
|
||||
|
||||
package viper
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
func newWatcher() (*watcher, error) {
|
||||
return &watcher{}, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS)
|
||||
}
|
||||
|
||||
type watcher struct {
|
||||
Events chan fsnotify.Event
|
||||
Errors chan error
|
||||
|
|
@ -24,7 +30,3 @@ func (*watcher) Add(name string) error {
|
|||
func (*watcher) Remove(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newWatcher() (*watcher, error) {
|
||||
return &watcher{}, errors.New("fsnotify is not supported on WASM")
|
||||
}
|
||||
|
|
@ -43,14 +43,14 @@ func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion {
|
|||
out = out.WithoutOptionalAttributesDeep()
|
||||
|
||||
if !isKnown {
|
||||
return cty.UnknownVal(out), nil
|
||||
return cty.UnknownVal(dynamicReplace(in.Type(), out)), nil
|
||||
}
|
||||
|
||||
if isNull {
|
||||
// We'll pass through nulls, albeit type converted, and let
|
||||
// the caller deal with whatever handling they want to do in
|
||||
// case null values are considered valid in some applications.
|
||||
return cty.NullVal(out), nil
|
||||
return cty.NullVal(dynamicReplace(in.Type(), out)), nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -190,7 +190,7 @@ func conversionTupleToSet(tupleType cty.Type, setEty cty.Type, unsafe bool) conv
|
|||
if len(tupleEtys) == 0 {
|
||||
// Empty tuple short-circuit
|
||||
return func(val cty.Value, path cty.Path) (cty.Value, error) {
|
||||
return cty.SetValEmpty(setEty), nil
|
||||
return cty.SetValEmpty(setEty.WithoutOptionalAttributesDeep()), nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -280,7 +280,7 @@ func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) co
|
|||
if len(tupleEtys) == 0 {
|
||||
// Empty tuple short-circuit
|
||||
return func(val cty.Value, path cty.Path) (cty.Value, error) {
|
||||
return cty.ListValEmpty(listEty), nil
|
||||
return cty.ListValEmpty(listEty.WithoutOptionalAttributesDeep()), nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -372,7 +372,7 @@ func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) co
|
|||
if len(objectAtys) == 0 {
|
||||
// Empty object short-circuit
|
||||
return func(val cty.Value, path cty.Path) (cty.Value, error) {
|
||||
return cty.MapValEmpty(mapEty), nil
|
||||
return cty.MapValEmpty(mapEty.WithoutOptionalAttributesDeep()), nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -31,3 +31,107 @@ func dynamicFixup(wantType cty.Type) conversion {
|
|||
func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) {
|
||||
return in, nil
|
||||
}
|
||||
|
||||
// dynamicReplace aims to return the out type unchanged, but if it finds a
|
||||
// dynamic type either directly or in any descendent elements it replaces them
|
||||
// with the equivalent type from in.
|
||||
//
|
||||
// This function assumes that in and out are compatible from a Convert
|
||||
// perspective, and will panic if it finds that they are not. For example if
|
||||
// in is an object and out is a map, this function will still attempt to iterate
|
||||
// through both as if they were the same.
|
||||
func dynamicReplace(in, out cty.Type) cty.Type {
|
||||
if in == cty.DynamicPseudoType || in == cty.NilType {
|
||||
// Short circuit this case, there's no point worrying about this if in
|
||||
// is a dynamic type or a nil type. Out is the best we can do.
|
||||
return out
|
||||
}
|
||||
|
||||
switch {
|
||||
case out == cty.DynamicPseudoType:
|
||||
// So replace out with in.
|
||||
return in
|
||||
case out.IsPrimitiveType(), out.IsCapsuleType():
|
||||
// out is not dynamic and it doesn't contain descendent elements so just
|
||||
// return it unchanged.
|
||||
return out
|
||||
case out.IsMapType():
|
||||
var elemType cty.Type
|
||||
|
||||
// Maps are compatible with other maps or objects.
|
||||
if in.IsMapType() {
|
||||
elemType = dynamicReplace(in.ElementType(), out.ElementType())
|
||||
}
|
||||
|
||||
if in.IsObjectType() {
|
||||
var types []cty.Type
|
||||
for _, t := range in.AttributeTypes() {
|
||||
types = append(types, t)
|
||||
}
|
||||
unifiedType, _ := unify(types, true)
|
||||
elemType = dynamicReplace(unifiedType, out.ElementType())
|
||||
}
|
||||
|
||||
return cty.Map(elemType)
|
||||
case out.IsObjectType():
|
||||
// Objects are compatible with other objects and maps.
|
||||
outTypes := map[string]cty.Type{}
|
||||
if in.IsMapType() {
|
||||
for attr, attrType := range out.AttributeTypes() {
|
||||
outTypes[attr] = dynamicReplace(in.ElementType(), attrType)
|
||||
}
|
||||
}
|
||||
|
||||
if in.IsObjectType() {
|
||||
for attr, attrType := range out.AttributeTypes() {
|
||||
if !in.HasAttribute(attr) {
|
||||
// If in does not have this attribute, then it is an
|
||||
// optional attribute and there is nothing we can do except
|
||||
// to return the type from out even if it is dynamic.
|
||||
outTypes[attr] = attrType
|
||||
continue
|
||||
}
|
||||
outTypes[attr] = dynamicReplace(in.AttributeType(attr), attrType)
|
||||
}
|
||||
}
|
||||
|
||||
return cty.Object(outTypes)
|
||||
case out.IsSetType():
|
||||
var elemType cty.Type
|
||||
|
||||
// Sets are compatible with other sets, lists, tuples.
|
||||
if in.IsSetType() || in.IsListType() {
|
||||
elemType = dynamicReplace(in.ElementType(), out.ElementType())
|
||||
}
|
||||
|
||||
if in.IsTupleType() {
|
||||
unifiedType, _ := unify(in.TupleElementTypes(), true)
|
||||
elemType = dynamicReplace(unifiedType, out.ElementType())
|
||||
}
|
||||
|
||||
return cty.Set(elemType)
|
||||
case out.IsListType():
|
||||
var elemType cty.Type
|
||||
|
||||
// Lists are compatible with other lists, sets, and tuples.
|
||||
if in.IsSetType() || in.IsListType() {
|
||||
elemType = dynamicReplace(in.ElementType(), out.ElementType())
|
||||
}
|
||||
|
||||
if in.IsTupleType() {
|
||||
unifiedType, _ := unify(in.TupleElementTypes(), true)
|
||||
elemType = dynamicReplace(unifiedType, out.ElementType())
|
||||
}
|
||||
|
||||
return cty.List(elemType)
|
||||
case out.IsTupleType():
|
||||
// Tuples are only compatible with other tuples
|
||||
var types []cty.Type
|
||||
for ix := 0; ix < len(out.TupleElementTypes()); ix++ {
|
||||
types = append(types, dynamicReplace(in.TupleElementType(ix), out.TupleElementType(ix)))
|
||||
}
|
||||
return cty.Tuple(types)
|
||||
default:
|
||||
panic("unrecognized type " + out.FriendlyName())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ embedmd:
|
|||
|
||||
.PHONY: install-tools
|
||||
install-tools:
|
||||
go get -u golang.org/x/lint/golint
|
||||
go get -u golang.org/x/tools/cmd/cover
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
go get -u github.com/rakyll/embedmd
|
||||
go install golang.org/x/lint/golint@latest
|
||||
go install golang.org/x/tools/cmd/cover@latest
|
||||
go install golang.org/x/tools/cmd/goimports@latest
|
||||
go install github.com/rakyll/embedmd@latest
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue