diff --git a/go.mod b/go.mod index b234e2198f..42f64b4806 100644 --- a/go.mod +++ b/go.mod @@ -14,10 +14,10 @@ require ( github.com/Masterminds/sprig/v3 v3.2.3 github.com/apparentlymart/go-cidr v1.1.0 github.com/aws/amazon-ec2-instance-selector/v2 v2.4.1 - github.com/aws/aws-sdk-go v1.44.311 + github.com/aws/aws-sdk-go v1.44.322 github.com/blang/semver/v4 v4.0.0 github.com/cert-manager/cert-manager v1.12.3 - github.com/digitalocean/godo v1.100.0 + github.com/digitalocean/godo v1.101.0 github.com/go-ini/ini v1.67.0 github.com/go-logr/logr v1.2.4 github.com/gogo/protobuf v1.3.2 @@ -27,11 +27,11 @@ require ( github.com/google/go-tpm-tools v0.4.0 github.com/google/uuid v1.3.0 github.com/gophercloud/gophercloud v1.5.0 - github.com/hetznercloud/hcloud-go v1.48.0 + github.com/hetznercloud/hcloud-go v1.49.0 github.com/jacksontj/memberlistmesh v0.0.0-20190905163944-93462b9d2bb7 github.com/mitchellh/mapstructure v1.5.0 github.com/pelletier/go-toml v1.9.5 - github.com/pkg/sftp v1.13.5 + github.com/pkg/sftp v1.13.6 github.com/prometheus/client_golang v1.16.0 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 github.com/sergi/go-diff v1.3.1 @@ -42,19 +42,19 @@ require ( github.com/stretchr/testify v1.8.4 github.com/weaveworks/mesh v0.0.0-20191105120815-58dbcc3e8e63 go.uber.org/multierr v1.11.0 - golang.org/x/crypto v0.11.0 - golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 - golang.org/x/net v0.12.0 - golang.org/x/oauth2 v0.10.0 + golang.org/x/crypto v0.12.0 + golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb + golang.org/x/net v0.14.0 + golang.org/x/oauth2 v0.11.0 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.10.0 - google.golang.org/api v0.134.0 + golang.org/x/sys v0.11.0 + google.golang.org/api v0.136.0 google.golang.org/grpc v1.57.0 google.golang.org/protobuf v1.31.0 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/inf.v0 v0.9.1 gopkg.in/square/go-jose.v2 v2.6.0 - helm.sh/helm/v3 v3.12.2 + helm.sh/helm/v3 v3.12.3 k8s.io/api v0.27.4 k8s.io/apimachinery v0.27.4 k8s.io/cli-runtime v0.27.4 @@ -68,13 +68,13 @@ require ( k8s.io/kubelet v0.27.4 k8s.io/mount-utils v0.27.4 k8s.io/utils v0.0.0-20230726121419-3b25d923346b - sigs.k8s.io/controller-runtime v0.15.0 + sigs.k8s.io/controller-runtime v0.15.1 sigs.k8s.io/structured-merge-diff/v4 v4.3.0 sigs.k8s.io/yaml v1.3.0 ) require ( - cloud.google.com/go/compute v1.20.1 // indirect + cloud.google.com/go/compute v1.23.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect @@ -137,9 +137,11 @@ require ( github.com/gorilla/mux v1.8.0 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v0.5.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.4 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect @@ -196,7 +198,7 @@ require ( github.com/sahilm/fuzzy v0.1.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/shopspring/decimal v1.3.1 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/sirupsen/logrus v1.9.1 // indirect github.com/spf13/afero v1.9.5 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect @@ -208,21 +210,21 @@ require ( go.opentelemetry.io/otel/trace v1.15.0 // indirect go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect golang.org/x/mod v0.11.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/term v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.9.1 // indirect gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.27.2 // indirect - k8s.io/cloud-provider v0.27.1 // indirect + k8s.io/apiextensions-apiserver v0.27.4 // indirect + k8s.io/cloud-provider v0.27.4 // indirect k8s.io/component-helpers v0.27.4 // indirect - k8s.io/csi-translation-lib v0.27.0 // indirect + k8s.io/csi-translation-lib v0.27.4 // indirect k8s.io/klog v1.0.0 // indirect k8s.io/kube-openapi v0.0.0-20230515203736-54b630e78af5 // indirect oras.land/oras-go v1.2.3 // indirect diff --git a/go.sum b/go.sum index d2795bb5c7..9644095996 100644 --- a/go.sum +++ b/go.sum @@ -33,8 +33,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= @@ -123,8 +123,8 @@ github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= github.com/aws/amazon-ec2-instance-selector/v2 v2.4.1 h1:DmxtwV+pkakkVRhxKcAgnLbxCxvT7k8DBG271dfKPZ8= github.com/aws/amazon-ec2-instance-selector/v2 v2.4.1/go.mod h1:AEJrtkLkCkfIBIazidrVrgZqaXl+9dxI/wRgjdw+7G0= -github.com/aws/aws-sdk-go v1.44.311 h1:60i8hyVMOXqabKJQPCq4qKRBQ6hRafI/WOcDxGM+J7Q= -github.com/aws/aws-sdk-go v1.44.311/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.322 h1:7JfwifGRGQMHd99PvfXqxBaZsjuRaOF6e3X9zRx2uYo= +github.com/aws/aws-sdk-go v1.44.322/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -184,8 +184,8 @@ github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitalocean/godo v1.100.0 h1:3MuDCh9Hw0MCBwV8GbHBiGrKZXCZ+VJfAY8iNCW+Mks= -github.com/digitalocean/godo v1.100.0/go.mod h1:SsS2oXo2rznfM/nORlZ/6JaUJZFhmKTib1YhopUc8NA= +github.com/digitalocean/godo v1.101.0 h1:gtnKn+TdW7e1y42q3ms86GXWWC/4kHttKJyTFZqdmEw= +github.com/digitalocean/godo v1.101.0/go.mod h1:SaUYccN7r+CO1QtsbXGypAsgobDrmSfVMJESEfXgoEg= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= @@ -227,6 +227,7 @@ github.com/evertras/bubble-table v0.14.4/go.mod h1:SPOZKbIpyYWPHBNki3fyNpiPBQkvk github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= @@ -399,6 +400,10 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -408,6 +413,8 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= +github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= @@ -422,8 +429,8 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hetznercloud/hcloud-go v1.48.0 h1:b6x0ABNYJr8zVX9sE1kCNMU/sndzI8vmZjxEWEr+Gn0= -github.com/hetznercloud/hcloud-go v1.48.0/go.mod h1:VzDWThl47lOnZXY0q5/LPFD+M62pfe/52TV+mOrpp9Q= +github.com/hetznercloud/hcloud-go v1.49.0 h1:b/dKaptbbahWrkzYilWi42UgyfpiQ0Qh9ZFvyAgAVoo= +github.com/hetznercloud/hcloud-go v1.49.0/go.mod h1:VzDWThl47lOnZXY0q5/LPFD+M62pfe/52TV+mOrpp9Q= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -478,6 +485,7 @@ github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3v github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -569,8 +577,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= -github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= +github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= @@ -622,8 +630,9 @@ github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5g github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.1 h1:Ou41VVR3nMWWmTiEUnj0OlsgOSCUFgsPAOl6jRIcVtQ= +github.com/sirupsen/logrus v1.9.1/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= @@ -718,10 +727,11 @@ golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -732,8 +742,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY= -golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb h1:mIKbk8weKhSeLH2GmUTrvx8CjkyJmnU1wFmg59CUjFA= +golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -812,8 +822,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -830,8 +840,8 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -907,7 +917,6 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -919,15 +928,15 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -940,8 +949,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1043,8 +1052,8 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= -google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw= -google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk= +google.golang.org/api v0.136.0 h1:e/6enzUE1s4tGPa6Q3ZYShKTtvRc+1Jq0rrafhppmOs= +google.golang.org/api v0.136.0/go.mod h1:XtJfF+V2zgUxelOn5Zs3kECtluMxneJG8ZxUTlLNTPA= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1113,10 +1122,10 @@ google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIYWHqOUZ1pva5qK/rwbIhoXEUB9Lu8= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 h1:Z8qdAF9GFsmcUuWQ5KVYIpP3PCKydn/YKORnghIalu4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1192,8 +1201,8 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -helm.sh/helm/v3 v3.12.2 h1:kFyDBr/mgJUlyGzVTCieG4wW0zmo7fcNRWK0+FKkxqU= -helm.sh/helm/v3 v3.12.2/go.mod h1:v1PMayudIfZAvec3Wp4wAErensvK/rv5fu/xCiE6t3I= +helm.sh/helm/v3 v3.12.3 h1:5y1+Sbty12t48T/t/CGNYUIME5BJ0WKfmW/sobYqkFg= +helm.sh/helm/v3 v3.12.3/go.mod h1:KPKQiX9IP5HX7o5YnnhViMnNuKiL/lJBVQ47GHe1R0k= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1203,16 +1212,16 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.27.4 h1:0pCo/AN9hONazBKlNUdhQymmnfLRbSZjd5H5H3f0bSs= k8s.io/api v0.27.4/go.mod h1:O3smaaX15NfxjzILfiln1D8Z3+gEYpjEpiNA/1EVK1Y= -k8s.io/apiextensions-apiserver v0.27.2 h1:iwhyoeS4xj9Y7v8YExhUwbVuBhMr3Q4bd/laClBV6Bo= -k8s.io/apiextensions-apiserver v0.27.2/go.mod h1:Oz9UdvGguL3ULgRdY9QMUzL2RZImotgxvGjdWRq6ZXQ= +k8s.io/apiextensions-apiserver v0.27.4 h1:ie1yZG4nY/wvFMIR2hXBeSVq+HfNzib60FjnBYtPGSs= +k8s.io/apiextensions-apiserver v0.27.4/go.mod h1:KHZaDr5H9IbGEnSskEUp/DsdXe1hMQ7uzpQcYUFt2bM= k8s.io/apimachinery v0.27.4 h1:CdxflD4AF61yewuid0fLl6bM4a3q04jWel0IlP+aYjs= k8s.io/apimachinery v0.27.4/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= k8s.io/cli-runtime v0.27.4 h1:Zb0eci+58eHZNnoHhjRFc7W88s8dlG12VtIl3Nv2Hto= k8s.io/cli-runtime v0.27.4/go.mod h1:k9Z1xiZq2xNplQmehpDquLgc+rE+pubpO1cK4al4Mlw= k8s.io/client-go v0.27.4 h1:vj2YTtSJ6J4KxaC88P4pMPEQECWMY8gqPqsTgUKzvjk= k8s.io/client-go v0.27.4/go.mod h1:ragcly7lUlN0SRPk5/ZkGnDjPknzb37TICq07WhI6Xc= -k8s.io/cloud-provider v0.27.1 h1:482W9e2Yp8LDgTUKrXAxT+nH4pHS2TiBElI/CnfGWac= -k8s.io/cloud-provider v0.27.1/go.mod h1:oN7Zci2Ls2dorwSNd2fMiW/6DA40+F4o2QL70p63bqo= +k8s.io/cloud-provider v0.27.4 h1:FkZ1z40+YPm+nEqkojgPbjNQ3QLvU98gsFW3ZbZnrwo= +k8s.io/cloud-provider v0.27.4/go.mod h1:LpqG1hrNPQQySPWrMrNNNGl79dK0fk/yTkYUlRMoaWU= k8s.io/cloud-provider-aws v1.27.2 h1:TKyzew4hXLzd81dDG3mNfsm1J++dJ98Ay9680dWpaK0= k8s.io/cloud-provider-aws v1.27.2/go.mod h1:9vUb5mnVnReSRDBWcBxB1b0HOeEc472iOPmrnwpN9SA= k8s.io/cloud-provider-gcp/providers v0.27.1 h1:dHBfDCVqu+AhC8JRt1tGJdm7MGwxUwPNwauO6Jx3DSo= @@ -1221,8 +1230,8 @@ k8s.io/component-base v0.27.4 h1:Wqc0jMKEDGjKXdae8hBXeskRP//vu1m6ypC+gwErj4c= k8s.io/component-base v0.27.4/go.mod h1:hoiEETnLc0ioLv6WPeDt8vD34DDeB35MfQnxCARq3kY= k8s.io/component-helpers v0.27.4 h1:l1hn/Zx9mWXflo5xz1mo5RRW2g8b6rptWCG7My6rYoE= k8s.io/component-helpers v0.27.4/go.mod h1:ayW5btpTdJkVv+CcxhzNRfWT+oPrV6T6qZ1Ay6NEJNI= -k8s.io/csi-translation-lib v0.27.0 h1:1tv+MhNxJRFCmexqoSYXks4N/4zikOrgyLpYF63lXzo= -k8s.io/csi-translation-lib v0.27.0/go.mod h1:ZOPmKWI/2Ad2GRTIBXzOyX52NmtTcDTsh4GWqoHvHVA= +k8s.io/csi-translation-lib v0.27.4 h1:yk/0MNZAOyTEGk/OBNMwPTe63nZYlO/FWFv+J3z5pEM= +k8s.io/csi-translation-lib v0.27.4/go.mod h1:yDQc83ATsJshOCKhvRuPSoGVJOduWvou4u7YRON4U98= k8s.io/gengo v0.0.0-20230306165830-ab3349d207d4 h1:aClvVG6GbX10ISHcc24J+tqbr0S7fEe1MWkFJ7cWWCI= k8s.io/gengo v0.0.0-20230306165830-ab3349d207d4/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -1247,8 +1256,8 @@ oras.land/oras-go v1.2.3/go.mod h1:M/uaPdYklze0Vf3AakfarnpoEckvw0ESbRdN8Z1vdJg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.15.0 h1:ML+5Adt3qZnMSYxZ7gAverBLNPSMQEibtzAgp0UPojU= -sigs.k8s.io/controller-runtime v0.15.0/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= +sigs.k8s.io/controller-runtime v0.15.1 h1:9UvgKD4ZJGcj24vefUFgZFP3xej/3igL9BsOUTb/+4c= +sigs.k8s.io/controller-runtime v0.15.1/go.mod h1:7ngYvp1MLT+9GeZ+6lH3LOlcHkp/+tzA/fmHa4iq9kk= sigs.k8s.io/gateway-api v0.7.0 h1:/mG8yyJNBifqvuVLW5gwlI4CQs0NR/5q4BKUlf1bVdY= sigs.k8s.io/gateway-api v0.7.0/go.mod h1:Xv0+ZMxX0lu1nSSDIIPEfbVztgNZ+3cfiYrJsa2Ooso= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/tests/e2e/go.mod b/tests/e2e/go.mod index 4a57d68558..cb783198bc 100644 --- a/tests/e2e/go.mod +++ b/tests/e2e/go.mod @@ -23,10 +23,10 @@ require ( ) require ( - cloud.google.com/go v0.110.4 // indirect - cloud.google.com/go/compute v1.20.1 // indirect + cloud.google.com/go v0.110.6 // indirect + cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.0 // indirect + cloud.google.com/go/iam v1.1.1 // indirect cloud.google.com/go/storage v1.30.1 // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect github.com/Azure/azure-storage-blob-go v0.15.0 // indirect @@ -35,7 +35,7 @@ require ( github.com/StackExchange/wmi v1.2.1 // indirect github.com/acomagu/bufpipe v1.0.3 // indirect github.com/apparentlymart/go-cidr v1.1.0 // indirect - github.com/aws/aws-sdk-go v1.44.311 // indirect + github.com/aws/aws-sdk-go v1.44.322 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect @@ -45,9 +45,9 @@ require ( github.com/containers/ocicrypt v1.1.3 // indirect github.com/containers/storage v1.38.3-0.20220301151551-d06b0f81c0aa // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/docker/cli v23.0.5+incompatible // indirect + github.com/docker/cli v24.0.0+incompatible // indirect github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/docker v23.0.5+incompatible // indirect + github.com/docker/docker v24.0.0+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/emicklei/go-restful/v3 v3.10.2 // indirect github.com/emirpasic/gods v1.12.0 // indirect @@ -65,7 +65,7 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.6.9 // indirect github.com/google/go-cmp v0.5.9 // indirect - github.com/google/go-containerregistry v0.15.2 // indirect + github.com/google/go-containerregistry v0.16.1 // indirect github.com/google/go-github/v33 v33.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.1-0.20210504230335-f78f29fc09ea // indirect @@ -98,37 +98,37 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc3 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pkg/sftp v1.13.5 // indirect + github.com/pkg/sftp v1.13.6 // indirect github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.43.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/sergi/go-diff v1.3.1 // indirect github.com/shirou/gopsutil/v3 v3.21.10 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/sirupsen/logrus v1.9.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/ulikunitz/xz v0.5.10 // indirect github.com/vbatts/tar-split v0.11.3 // indirect github.com/xanzy/ssh-agent v0.3.1 // indirect go.opencensus.io v0.24.0 // indirect go4.org v0.0.0-20201209231011-d4a079459e60 // indirect - golang.org/x/crypto v0.11.0 // indirect - golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 // indirect + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb // indirect golang.org/x/mod v0.11.0 // indirect - golang.org/x/net v0.12.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/net v0.14.0 // indirect + golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.10.0 // indirect - golang.org/x/term v0.10.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/term v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.9.1 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/api v0.134.0 // indirect + google.golang.org/api v0.136.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 // indirect + google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect google.golang.org/grpc v1.57.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/tests/e2e/go.sum b/tests/e2e/go.sum index b9b21fb525..0929702b4f 100644 --- a/tests/e2e/go.sum +++ b/tests/e2e/go.sum @@ -33,23 +33,23 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= -cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v1.1.0 h1:67gSqaPukx7O8WLLHMa0PNs3EBGd2eE4d+psbO/CO94= -cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= +cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/logging v1.0.0/go.mod h1:V1cc3ogwobYzQq5f2R7DS/GvRIrI4FKj01Gs5glwAls= cloud.google.com/go/logging v1.1.2/go.mod h1:KrljuAHIw631j9+QXsnq9vDwsrwmdxfGpivMR68M7DY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -305,8 +305,8 @@ github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU github.com/aws/aws-sdk-go v1.31.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.37.22/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.311 h1:60i8hyVMOXqabKJQPCq4qKRBQ6hRafI/WOcDxGM+J7Q= -github.com/aws/aws-sdk-go v1.44.311/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.322 h1:7JfwifGRGQMHd99PvfXqxBaZsjuRaOF6e3X9zRx2uYo= +github.com/aws/aws-sdk-go v1.44.322/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= github.com/bazelbuild/buildtools v0.0.0-20200922170545-10384511ce98/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= @@ -568,8 +568,8 @@ github.com/docker/cli v0.0.0-20190925022749-754388324470/go.mod h1:JLrzqnKDaYBop github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v0.0.0-20200210162036-a4bedce16568/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v23.0.5+incompatible h1:ufWmAOuD3Vmr7JP2G5K3cyuNC4YZWiAsuDEvFVVDafE= -github.com/docker/cli v23.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v24.0.0+incompatible h1:0+1VshNwBQzQAx9lOl+OYCTCEAD8fKs/qeXMx3O0wqM= +github.com/docker/cli v24.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -585,8 +585,8 @@ github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v23.0.5+incompatible h1:DaxtlTJjFSnLOXVNUBU1+6kXGz2lpDoEAH6QoxaSg8k= -github.com/docker/docker v23.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.0+incompatible h1:z4bf8HvONXX9Tde5lGBMQ7yCJgNahmJumdrStZAbeY4= +github.com/docker/docker v24.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= @@ -1018,8 +1018,8 @@ github.com/google/go-containerregistry v0.0.0-20200331213917-3d03ed9b1ca2/go.mod github.com/google/go-containerregistry v0.1.1/go.mod h1:npTSyywOeILcgWqd+rvtzGWflIPPcBQhYoOONaY4ltM= github.com/google/go-containerregistry v0.3.0/go.mod h1:BJ7VxR1hAhdiZBGGnvGETHEmFs1hzXc4VM1xjOPO9wA= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/go-containerregistry v0.15.2 h1:MMkSh+tjSdnmJZO7ljvEqV1DjfekB6VUEAZgy3a+TQE= -github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q= +github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= +github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github/v27 v27.0.6/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0= github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= @@ -1597,8 +1597,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= -github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= +github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -1748,8 +1748,9 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.1 h1:Ou41VVR3nMWWmTiEUnj0OlsgOSCUFgsPAOl6jRIcVtQ= +github.com/sirupsen/logrus v1.9.1/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= @@ -2077,8 +2078,9 @@ golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2094,8 +2096,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY= -golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb h1:mIKbk8weKhSeLH2GmUTrvx8CjkyJmnU1wFmg59CUjFA= +golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -2210,8 +2212,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2232,8 +2234,8 @@ golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2388,15 +2390,15 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2410,8 +2412,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2593,8 +2595,8 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.134.0 h1:ktL4Goua+UBgoP1eL1/60LwZJqa1sIzkLmvoR3hR6Gw= -google.golang.org/api v0.134.0/go.mod h1:sjRL3UnjTx5UqNQS9EWr9N8p7xbHpy1k0XGRLCf3Spk= +google.golang.org/api v0.136.0 h1:e/6enzUE1s4tGPa6Q3ZYShKTtvRc+1Jq0rrafhppmOs= +google.golang.org/api v0.136.0/go.mod h1:XtJfF+V2zgUxelOn5Zs3kECtluMxneJG8ZxUTlLNTPA= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2683,12 +2685,12 @@ google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIYWHqOUZ1pva5qK/rwbIhoXEUB9Lu8= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 h1:Z8qdAF9GFsmcUuWQ5KVYIpP3PCKydn/YKORnghIalu4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index e939b9f5e0..6395537003 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.20.1" +const Version = "1.23.0" diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index f8ab885015..e5da12fedd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -39,6 +39,7 @@ const ( EuWest1RegionID = "eu-west-1" // Europe (Ireland). EuWest2RegionID = "eu-west-2" // Europe (London). EuWest3RegionID = "eu-west-3" // Europe (Paris). + IlCentral1RegionID = "il-central-1" // Israel (Tel Aviv). MeCentral1RegionID = "me-central-1" // Middle East (UAE). MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). @@ -117,7 +118,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$") return reg }(), }, @@ -213,6 +214,9 @@ var awsPartition = partition{ "eu-west-3": region{ Description: "Europe (Paris)", }, + "il-central-1": region{ + Description: "Israel (Tel Aviv)", + }, "me-central-1": region{ Description: "Middle East (UAE)", }, @@ -356,6 +360,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -494,6 +501,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -1439,6 +1449,14 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "api.ecr.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -1907,6 +1925,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2247,6 +2268,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2390,6 +2414,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2475,6 +2502,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2617,6 +2647,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -3387,6 +3420,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -3466,6 +3502,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.ap-south-1.api.aws", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3493,6 +3538,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.ap-southeast-3.api.aws", }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3511,6 +3565,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.eu-central-1.api.aws", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -3529,6 +3592,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.eu-south-1.api.aws", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3812,6 +3884,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4735,6 +4810,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4875,6 +4953,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -5040,6 +5121,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -5195,6 +5279,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -5417,6 +5504,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -5733,6 +5823,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -6851,6 +6944,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -7019,6 +7115,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, @@ -8116,6 +8218,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -8208,6 +8313,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8359,6 +8467,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -8578,6 +8689,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8587,18 +8701,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8608,6 +8731,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8741,6 +8867,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -8870,6 +8999,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "local", }: endpoint{ @@ -9075,6 +9207,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9252,6 +9387,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9421,6 +9559,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9599,6 +9740,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9717,6 +9861,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9886,6 +10033,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -10292,6 +10442,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-me-central-1", }: endpoint{ @@ -10355,6 +10514,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -10523,6 +10691,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -10691,6 +10862,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -10868,6 +11042,9 @@ var awsPartition = partition{ }, "emr-containers": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -10901,6 +11078,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -11217,6 +11397,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -11398,6 +11581,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -11630,6 +11816,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -12817,6 +13006,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -12826,6 +13018,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -12889,6 +13084,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -13526,6 +13724,9 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -13568,6 +13769,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -13577,6 +13781,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -13938,6 +14145,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "internetmonitor.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -13978,6 +14191,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "internetmonitor.eu-west-3.api.aws", }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "internetmonitor.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -13998,21 +14216,45 @@ var awsPartition = partition{ }: endpoint{ Hostname: "internetmonitor.us-east-1.api.aws", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{ Hostname: "internetmonitor.us-east-2.api.aws", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{ Hostname: "internetmonitor.us-west-1.api.aws", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{ Hostname: "internetmonitor.us-west-2.api.aws", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-west-2.api.aws", + }, }, }, "iot": service{ @@ -15427,6 +15669,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking.eu-west-3.api.aws", }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "kendra-ranking.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -15580,6 +15827,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -15689,6 +15939,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -16135,6 +16388,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + }, endpointKey{ Region: "il-central-1-fips", }: endpoint{ @@ -16142,6 +16404,7 @@ var awsPartition = partition{ CredentialScope: credentialScope{ Region: "il-central-1", }, + Deprecated: boxedTrue, }, endpointKey{ Region: "me-central-1", @@ -16291,6 +16554,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -16300,6 +16566,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -16363,6 +16632,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -16628,6 +16900,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -16945,6 +17226,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -17263,6 +17547,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -18405,6 +18692,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -18490,6 +18780,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -18953,6 +19246,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -19579,6 +19875,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -20324,6 +20623,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -21263,6 +21565,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -21423,6 +21728,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -21547,6 +21855,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -21987,6 +22298,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -22488,6 +22802,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "resource-explorer-2.eu-west-3.api.aws", }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.il-central-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{ @@ -22613,6 +22932,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -22873,6 +23195,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -23110,6 +23435,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -23481,6 +23809,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.il-central-1.amazonaws.com", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -24518,6 +24855,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -24699,6 +25039,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -24950,6 +25293,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -25053,6 +25399,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -25062,6 +25411,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -25074,12 +25426,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -25384,6 +25742,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery.eu-west-3.api.aws", }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -25897,75 +26264,6 @@ var awsPartition = partition{ }, "sms": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -25975,39 +26273,6 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", - }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -26511,6 +26776,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -26662,6 +26930,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -26825,6 +27096,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -27424,6 +27698,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -27701,6 +27978,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "local", }: endpoint{ @@ -27804,6 +28084,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -28011,6 +28294,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -28156,6 +28442,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -28265,6 +28554,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -28406,6 +28698,25 @@ var awsPartition = partition{ }, }, }, + "tnb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "transcribe": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -29214,6 +29525,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -29816,6 +30130,7 @@ var awsPartition = partition{ CredentialScope: credentialScope{ Region: "il-central-1", }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-me-central-1", @@ -29880,6 +30195,23 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "waf-regional.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -30530,6 +30862,7 @@ var awsPartition = partition{ CredentialScope: credentialScope{ Region: "il-central-1", }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-me-central-1", @@ -30594,6 +30927,23 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "wafv2.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -31108,6 +31458,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -31384,6 +31737,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "athena": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32761,9 +33124,6 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, }, }, "snowball": service{ @@ -36625,6 +36985,16 @@ var awsusgovPartition = partition{ }, }, }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "logs": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -38240,15 +38610,6 @@ var awsusgovPartition = partition{ }, "sms": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -38258,15 +38619,6 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", - }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -39836,6 +40188,15 @@ var awsisoPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, @@ -39845,6 +40206,15 @@ var awsisoPartition = partition{ }: endpoint{ Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, "rds": service{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index bd1c349966..b9787264c6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.311" +const SDKVersion = "1.44.322" diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go index bfb77cd326..b4d7de3c3d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go @@ -7038,6 +7038,39 @@ func (s *Alarm) SetAlarmName(v string) *Alarm { return s } +// Specifies the CloudWatch alarm specification to use in an instance refresh. +type AlarmSpecification struct { + _ struct{} `type:"structure"` + + // The names of one or more CloudWatch alarms to monitor for the instance refresh. + // You can specify up to 10 alarms. + Alarms []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AlarmSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AlarmSpecification) GoString() string { + return s.String() +} + +// SetAlarms sets the Alarms field's value. +func (s *AlarmSpecification) SetAlarms(v []*string) *AlarmSpecification { + s.Alarms = v + return s +} + type AttachInstancesInput struct { _ struct{} `type:"structure"` @@ -18744,8 +18777,13 @@ func (s RecordLifecycleActionHeartbeatOutput) GoString() string { type RefreshPreferences struct { _ struct{} `type:"structure"` + // (Optional) The CloudWatch alarm specification. CloudWatch alarms can be used + // to identify any issues and fail the operation if an alarm threshold is met. + AlarmSpecification *AlarmSpecification `type:"structure"` + // (Optional) Indicates whether to roll back the Auto Scaling group to its previous - // configuration if the instance refresh fails. The default is false. + // configuration if the instance refresh fails or a CloudWatch alarm threshold + // is met. The default is false. // // A rollback is not supported in the following situations: // @@ -18757,6 +18795,9 @@ type RefreshPreferences struct { // // * The Auto Scaling group uses the launch template's $Latest or $Default // version. + // + // For more information, see Undo changes with a rollback (https://docs.aws.amazon.com/autoscaling/ec2/userguide/instance-refresh-rollback.html) + // in the Amazon EC2 Auto Scaling User Guide. AutoRollback *bool `type:"boolean"` // (Optional) The amount of time, in seconds, to wait after a checkpoint before @@ -18869,6 +18910,12 @@ func (s RefreshPreferences) GoString() string { return s.String() } +// SetAlarmSpecification sets the AlarmSpecification field's value. +func (s *RefreshPreferences) SetAlarmSpecification(v *AlarmSpecification) *RefreshPreferences { + s.AlarmSpecification = v + return s +} + // SetAutoRollback sets the AutoRollback field's value. func (s *RefreshPreferences) SetAutoRollback(v bool) *RefreshPreferences { s.AutoRollback = &v @@ -19961,6 +20008,8 @@ type StartInstanceRefreshInput struct { // // * Checkpoints // + // * CloudWatch alarms + // // * Skip matching Preferences *RefreshPreferences `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 062b4df3ff..c1ddd39b45 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -72923,6 +72923,21 @@ type CreateNetworkInterfaceInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + // If you’re creating a network interface in a dual-stack or IPv6-only subnet, + // you have the option to assign a primary IPv6 IP address. A primary IPv6 address + // is an IPv6 GUA address associated with an ENI that you have enabled to use + // a primary IPv6 address. Use this option if the instance that this ENI will + // be attached to relies on its IPv6 address not changing. Amazon Web Services + // will automatically assign an IPv6 address associated with the ENI attached + // to your instance to be the primary IPv6 address. Once you enable an IPv6 + // GUA address to be a primary IPv6, you cannot disable it. When you enable + // an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made + // the primary IPv6 address until the instance is terminated or the network + // interface is detached. If you have multiple IPv6 addresses associated with + // an ENI attached to your instance and you enable a primary IPv6 address, the + // first IPv6 GUA address associated with the ENI becomes the primary IPv6 address. + EnablePrimaryIpv6 *bool `type:"boolean"` + // The IDs of one or more security groups. Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` @@ -73060,6 +73075,12 @@ func (s *CreateNetworkInterfaceInput) SetDryRun(v bool) *CreateNetworkInterfaceI return s } +// SetEnablePrimaryIpv6 sets the EnablePrimaryIpv6 field's value. +func (s *CreateNetworkInterfaceInput) SetEnablePrimaryIpv6(v bool) *CreateNetworkInterfaceInput { + s.EnablePrimaryIpv6 = &v + return s +} + // SetGroups sets the Groups field's value. func (s *CreateNetworkInterfaceInput) SetGroups(v []*string) *CreateNetworkInterfaceInput { s.Groups = v @@ -112880,8 +112901,7 @@ type EbsBlockDevice struct { // The ARN of the Outpost on which the snapshot is stored. // - // This parameter is only supported on BlockDeviceMapping objects called by - // CreateImage (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateImage.html). + // This parameter is not supported when using CreateImage (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateImage.html). OutpostArn *string `locationName:"outpostArn" type:"string"` // The ID of the snapshot. @@ -130058,6 +130078,13 @@ type InstanceIpv6Address struct { // The IPv6 address. Ipv6Address *string `locationName:"ipv6Address" type:"string"` + + // Determines if an IPv6 address associated with a network interface is the + // primary IPv6 address. When you enable an IPv6 GUA address to be a primary + // IPv6, the first IPv6 GUA will be made the primary IPv6 address until the + // instance is terminated or the network interface is detached. For more information, + // see RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html). + IsPrimaryIpv6 *bool `locationName:"isPrimaryIpv6" type:"boolean"` } // String returns the string representation. @@ -130084,6 +130111,12 @@ func (s *InstanceIpv6Address) SetIpv6Address(v string) *InstanceIpv6Address { return s } +// SetIsPrimaryIpv6 sets the IsPrimaryIpv6 field's value. +func (s *InstanceIpv6Address) SetIsPrimaryIpv6(v bool) *InstanceIpv6Address { + s.IsPrimaryIpv6 = &v + return s +} + // Describes an IPv6 address. type InstanceIpv6AddressRequest struct { _ struct{} `type:"structure"` @@ -130922,6 +130955,13 @@ type InstanceNetworkInterfaceSpecification struct { // specify a network interface ID in a launch specification. NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + // The primary IPv6 address of the network interface. When you enable an IPv6 + // GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary + // IPv6 address until the instance is terminated or the network interface is + // detached. For more information about primary IPv6 addresses, see RunInstances + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html). + PrimaryIpv6 *bool `type:"boolean"` + // The private IPv4 address of the network interface. Applies only if creating // a network interface when launching an instance. You cannot specify this option // if you're launching more than one instance in a RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) @@ -131054,6 +131094,12 @@ func (s *InstanceNetworkInterfaceSpecification) SetNetworkInterfaceId(v string) return s } +// SetPrimaryIpv6 sets the PrimaryIpv6 field's value. +func (s *InstanceNetworkInterfaceSpecification) SetPrimaryIpv6(v bool) *InstanceNetworkInterfaceSpecification { + s.PrimaryIpv6 = &v + return s +} + // SetPrivateIpAddress sets the PrivateIpAddress field's value. func (s *InstanceNetworkInterfaceSpecification) SetPrivateIpAddress(v string) *InstanceNetworkInterfaceSpecification { s.PrivateIpAddress = &v @@ -137434,6 +137480,13 @@ type LaunchTemplateInstanceNetworkInterfaceSpecification struct { // The ID of the network interface. NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` + // The primary IPv6 address of the network interface. When you enable an IPv6 + // GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary + // IPv6 address until the instance is terminated or the network interface is + // detached. For more information about primary IPv6 addresses, see RunInstances + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html). + PrimaryIpv6 *bool `locationName:"primaryIpv6" type:"boolean"` + // The primary private IPv4 address of the network interface. PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` @@ -137555,6 +137608,12 @@ func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetNetworkInterfac return s } +// SetPrimaryIpv6 sets the PrimaryIpv6 field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetPrimaryIpv6(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.PrimaryIpv6 = &v + return s +} + // SetPrivateIpAddress sets the PrivateIpAddress field's value. func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetPrivateIpAddress(v string) *LaunchTemplateInstanceNetworkInterfaceSpecification { s.PrivateIpAddress = &v @@ -137648,6 +137707,13 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { // The ID of the network interface. NetworkInterfaceId *string `type:"string"` + // The primary IPv6 address of the network interface. When you enable an IPv6 + // GUA address to be a primary IPv6, the first IPv6 GUA will be made the primary + // IPv6 address until the instance is terminated or the network interface is + // detached. For more information about primary IPv6 addresses, see RunInstances + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html). + PrimaryIpv6 *bool `type:"boolean"` + // The primary private IPv4 address of the network interface. PrivateIpAddress *string `type:"string"` @@ -137769,6 +137835,12 @@ func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetNetworkI return s } +// SetPrimaryIpv6 sets the PrimaryIpv6 field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetPrimaryIpv6(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.PrimaryIpv6 = &v + return s +} + // SetPrivateIpAddress sets the PrivateIpAddress field's value. func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetPrivateIpAddress(v string) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { s.PrivateIpAddress = &v @@ -144423,6 +144495,21 @@ type ModifyNetworkInterfaceAttributeInput struct { // attached to the instance. EnaSrdSpecification *EnaSrdSpecification `type:"structure"` + // If you’re modifying a network interface in a dual-stack or IPv6-only subnet, + // you have the option to assign a primary IPv6 IP address. A primary IPv6 address + // is an IPv6 GUA address associated with an ENI that you have enabled to use + // a primary IPv6 address. Use this option if the instance that this ENI will + // be attached to relies on its IPv6 address not changing. Amazon Web Services + // will automatically assign an IPv6 address associated with the ENI attached + // to your instance to be the primary IPv6 address. Once you enable an IPv6 + // GUA address to be a primary IPv6, you cannot disable it. When you enable + // an IPv6 GUA address to be a primary IPv6, the first IPv6 GUA will be made + // the primary IPv6 address until the instance is terminated or the network + // interface is detached. If you have multiple IPv6 addresses associated with + // an ENI attached to your instance and you enable a primary IPv6 address, the + // first IPv6 GUA address associated with the ENI becomes the primary IPv6 address. + EnablePrimaryIpv6 *bool `type:"boolean"` + // Changes the security groups for the network interface. The new set of groups // you specify replaces the current set. You must specify at least one group, // even if it's just the default security group in the VPC. You must specify @@ -144498,6 +144585,12 @@ func (s *ModifyNetworkInterfaceAttributeInput) SetEnaSrdSpecification(v *EnaSrdS return s } +// SetEnablePrimaryIpv6 sets the EnablePrimaryIpv6 field's value. +func (s *ModifyNetworkInterfaceAttributeInput) SetEnablePrimaryIpv6(v bool) *ModifyNetworkInterfaceAttributeInput { + s.EnablePrimaryIpv6 = &v + return s +} + // SetGroups sets the Groups field's value. func (s *ModifyNetworkInterfaceAttributeInput) SetGroups(v []*string) *ModifyNetworkInterfaceAttributeInput { s.Groups = v @@ -151911,6 +152004,13 @@ type NetworkInterfaceIpv6Address struct { // The IPv6 address. Ipv6Address *string `locationName:"ipv6Address" type:"string"` + + // Determines if an IPv6 address associated with a network interface is the + // primary IPv6 address. When you enable an IPv6 GUA address to be a primary + // IPv6, the first IPv6 GUA will be made the primary IPv6 address until the + // instance is terminated or the network interface is detached. For more information, + // see ModifyNetworkInterfaceAttribute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyNetworkInterfaceAttribute.html). + IsPrimaryIpv6 *bool `locationName:"isPrimaryIpv6" type:"boolean"` } // String returns the string representation. @@ -151937,6 +152037,12 @@ func (s *NetworkInterfaceIpv6Address) SetIpv6Address(v string) *NetworkInterface return s } +// SetIsPrimaryIpv6 sets the IsPrimaryIpv6 field's value. +func (s *NetworkInterfaceIpv6Address) SetIsPrimaryIpv6(v bool) *NetworkInterfaceIpv6Address { + s.IsPrimaryIpv6 = &v + return s +} + // Describes a permission for a network interface. type NetworkInterfacePermission struct { _ struct{} `type:"structure"` @@ -164029,6 +164135,21 @@ type RunInstancesInput struct { // are considered current customers and will be able to continue using the service. ElasticInferenceAccelerators []*ElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"` + // If you’re launching an instance into a dual-stack or IPv6-only subnet, + // you can enable assigning a primary IPv6 address. A primary IPv6 address is + // an IPv6 GUA address associated with an ENI that you have enabled to use a + // primary IPv6 address. Use this option if an instance relies on its IPv6 address + // not changing. When you launch the instance, Amazon Web Services will automatically + // assign an IPv6 address associated with the ENI attached to your instance + // to be the primary IPv6 address. Once you enable an IPv6 GUA address to be + // a primary IPv6, you cannot disable it. When you enable an IPv6 GUA address + // to be a primary IPv6, the first IPv6 GUA will be made the primary IPv6 address + // until the instance is terminated or the network interface is detached. If + // you have multiple IPv6 addresses associated with an ENI attached to your + // instance and you enable a primary IPv6 address, the first IPv6 GUA address + // associated with the ENI becomes the primary IPv6 address. + EnablePrimaryIpv6 *bool `type:"boolean"` + // Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. // For more information, see What is Amazon Web Services Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) // in the Amazon Web Services Nitro Enclaves User Guide. @@ -164366,6 +164487,12 @@ func (s *RunInstancesInput) SetElasticInferenceAccelerators(v []*ElasticInferenc return s } +// SetEnablePrimaryIpv6 sets the EnablePrimaryIpv6 field's value. +func (s *RunInstancesInput) SetEnablePrimaryIpv6(v bool) *RunInstancesInput { + s.EnablePrimaryIpv6 = &v + return s +} + // SetEnclaveOptions sets the EnclaveOptions field's value. func (s *RunInstancesInput) SetEnclaveOptions(v *EnclaveOptionsRequest) *RunInstancesInput { s.EnclaveOptions = v @@ -186877,6 +187004,51 @@ const ( // InstanceTypeC7gn16xlarge is a InstanceType enum value InstanceTypeC7gn16xlarge = "c7gn.16xlarge" + + // InstanceTypeP548xlarge is a InstanceType enum value + InstanceTypeP548xlarge = "p5.48xlarge" + + // InstanceTypeM7iLarge is a InstanceType enum value + InstanceTypeM7iLarge = "m7i.large" + + // InstanceTypeM7iXlarge is a InstanceType enum value + InstanceTypeM7iXlarge = "m7i.xlarge" + + // InstanceTypeM7i2xlarge is a InstanceType enum value + InstanceTypeM7i2xlarge = "m7i.2xlarge" + + // InstanceTypeM7i4xlarge is a InstanceType enum value + InstanceTypeM7i4xlarge = "m7i.4xlarge" + + // InstanceTypeM7i8xlarge is a InstanceType enum value + InstanceTypeM7i8xlarge = "m7i.8xlarge" + + // InstanceTypeM7i12xlarge is a InstanceType enum value + InstanceTypeM7i12xlarge = "m7i.12xlarge" + + // InstanceTypeM7i16xlarge is a InstanceType enum value + InstanceTypeM7i16xlarge = "m7i.16xlarge" + + // InstanceTypeM7i24xlarge is a InstanceType enum value + InstanceTypeM7i24xlarge = "m7i.24xlarge" + + // InstanceTypeM7i48xlarge is a InstanceType enum value + InstanceTypeM7i48xlarge = "m7i.48xlarge" + + // InstanceTypeM7iFlexLarge is a InstanceType enum value + InstanceTypeM7iFlexLarge = "m7i-flex.large" + + // InstanceTypeM7iFlexXlarge is a InstanceType enum value + InstanceTypeM7iFlexXlarge = "m7i-flex.xlarge" + + // InstanceTypeM7iFlex2xlarge is a InstanceType enum value + InstanceTypeM7iFlex2xlarge = "m7i-flex.2xlarge" + + // InstanceTypeM7iFlex4xlarge is a InstanceType enum value + InstanceTypeM7iFlex4xlarge = "m7i-flex.4xlarge" + + // InstanceTypeM7iFlex8xlarge is a InstanceType enum value + InstanceTypeM7iFlex8xlarge = "m7i-flex.8xlarge" ) // InstanceType_Values returns all elements of the InstanceType enum @@ -187547,6 +187719,21 @@ func InstanceType_Values() []string { InstanceTypeC7gn8xlarge, InstanceTypeC7gn12xlarge, InstanceTypeC7gn16xlarge, + InstanceTypeP548xlarge, + InstanceTypeM7iLarge, + InstanceTypeM7iXlarge, + InstanceTypeM7i2xlarge, + InstanceTypeM7i4xlarge, + InstanceTypeM7i8xlarge, + InstanceTypeM7i12xlarge, + InstanceTypeM7i16xlarge, + InstanceTypeM7i24xlarge, + InstanceTypeM7i48xlarge, + InstanceTypeM7iFlexLarge, + InstanceTypeM7iFlexXlarge, + InstanceTypeM7iFlex2xlarge, + InstanceTypeM7iFlex4xlarge, + InstanceTypeM7iFlex8xlarge, } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go b/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go index 643b4fb74c..82e93a9650 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go @@ -1113,6 +1113,8 @@ func (c *ELBV2) DeregisterTargetsRequest(input *DeregisterTargetsInput) (req *re // the targets are deregistered, they no longer receive traffic from the load // balancer. // +// Note: If the specified target does not exist, the action returns successfully. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3274,11 +3276,13 @@ func (c *ELBV2) SetSecurityGroupsRequest(input *SetSecurityGroupsInput) (req *re // SetSecurityGroups API operation for Elastic Load Balancing. // // Associates the specified security groups with the specified Application Load -// Balancer. The specified security groups override the previously associated -// security groups. +// Balancer or Network Load Balancer. The specified security groups override +// the previously associated security groups. // -// You can't specify a security group for a Network Load Balancer or Gateway -// Load Balancer. +// You can't perform this operation on a Network Load Balancer unless you specified +// a security group for the load balancer when you created it. +// +// You can't associate a security group with a Gateway Load Balancer. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4458,8 +4462,8 @@ type CreateLoadBalancerInput struct { // You cannot specify a scheme for a Gateway Load Balancer. Scheme *string `type:"string" enum:"LoadBalancerSchemeEnum"` - // [Application Load Balancers] The IDs of the security groups for the load - // balancer. + // [Application Load Balancers and Network Load Balancers] The IDs of the security + // groups for the load balancer. SecurityGroups []*string `type:"list"` // The IDs of the public subnets. You can specify only one subnet per Availability @@ -6836,6 +6840,14 @@ func (s *HttpRequestMethodConditionConfig) SetValues(v []*string) *HttpRequestMe // Information about an Elastic Load Balancing resource limit for your Amazon // Web Services account. +// +// For more information, see the following: +// +// - Quotas for your Application Load Balancers (https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-limits.html) +// +// - Quotas for your Network Load Balancers (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/load-balancer-limits.html) +// +// - Quotas for your Gateway Load Balancers (https://docs.aws.amazon.com/elasticloadbalancing/latest/gateway/quotas-limits.html) type Limit struct { _ struct{} `type:"structure"` @@ -7029,6 +7041,10 @@ type LoadBalancer struct { // The public DNS name of the load balancer. DNSName *string `type:"string"` + // Indicates whether to evaluate inbound security group rules for traffic sent + // to a Network Load Balancer through Amazon Web Services PrivateLink. + EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic *string `type:"string"` + // The type of IP addresses used by the subnets for your load balancer. The // possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and // IPv6 addresses). @@ -7112,6 +7128,12 @@ func (s *LoadBalancer) SetDNSName(v string) *LoadBalancer { return s } +// SetEnforceSecurityGroupInboundRulesOnPrivateLinkTraffic sets the EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic field's value. +func (s *LoadBalancer) SetEnforceSecurityGroupInboundRulesOnPrivateLinkTraffic(v string) *LoadBalancer { + s.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic = &v + return s +} + // SetIpAddressType sets the IpAddressType field's value. func (s *LoadBalancer) SetIpAddressType(v string) *LoadBalancer { s.IpAddressType = &v @@ -8683,6 +8705,8 @@ func (s *Rule) SetRuleArn(v string) *Rule { // can also optionally include one or more of each of the following conditions: // http-header and query-string. Note that the value for a condition cannot // be empty. +// +// For more information, see Quotas for your Application Load Balancers (https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-limits.html). type RuleCondition struct { _ struct{} `type:"structure"` @@ -9056,6 +9080,11 @@ func (s *SetRulePrioritiesOutput) SetRules(v []*Rule) *SetRulePrioritiesOutput { type SetSecurityGroupsInput struct { _ struct{} `type:"structure"` + // Indicates whether to evaluate inbound security group rules for traffic sent + // to a Network Load Balancer through Amazon Web Services PrivateLink. The default + // is on. + EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic *string `type:"string" enum:"EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum"` + // The Amazon Resource Name (ARN) of the load balancer. // // LoadBalancerArn is a required field @@ -9101,6 +9130,12 @@ func (s *SetSecurityGroupsInput) Validate() error { return nil } +// SetEnforceSecurityGroupInboundRulesOnPrivateLinkTraffic sets the EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic field's value. +func (s *SetSecurityGroupsInput) SetEnforceSecurityGroupInboundRulesOnPrivateLinkTraffic(v string) *SetSecurityGroupsInput { + s.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic = &v + return s +} + // SetLoadBalancerArn sets the LoadBalancerArn field's value. func (s *SetSecurityGroupsInput) SetLoadBalancerArn(v string) *SetSecurityGroupsInput { s.LoadBalancerArn = &v @@ -9116,6 +9151,10 @@ func (s *SetSecurityGroupsInput) SetSecurityGroups(v []*string) *SetSecurityGrou type SetSecurityGroupsOutput struct { _ struct{} `type:"structure"` + // Indicates whether to evaluate inbound security group rules for traffic sent + // to a Network Load Balancer through Amazon Web Services PrivateLink. + EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic *string `type:"string" enum:"EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum"` + // The IDs of the security groups associated with the load balancer. SecurityGroupIds []*string `type:"list"` } @@ -9138,6 +9177,12 @@ func (s SetSecurityGroupsOutput) GoString() string { return s.String() } +// SetEnforceSecurityGroupInboundRulesOnPrivateLinkTraffic sets the EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic field's value. +func (s *SetSecurityGroupsOutput) SetEnforceSecurityGroupInboundRulesOnPrivateLinkTraffic(v string) *SetSecurityGroupsOutput { + s.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic = &v + return s +} + // SetSecurityGroupIds sets the SecurityGroupIds field's value. func (s *SetSecurityGroupsOutput) SetSecurityGroupIds(v []*string) *SetSecurityGroupsOutput { s.SecurityGroupIds = v @@ -9150,7 +9195,7 @@ type SetSubnetsInput struct { // [Network Load Balancers] The type of IP addresses used by the subnets for // your load balancer. The possible values are ipv4 (for IPv4 addresses) and // dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for - // a load balancer with a UDP or TCP_UDP listener. . + // a load balancer with a UDP or TCP_UDP listener. IpAddressType *string `type:"string" enum:"IpAddressType"` // The Amazon Resource Name (ARN) of the load balancer. @@ -9590,7 +9635,8 @@ type TargetDescription struct { // The port on which the target is listening. If the target group protocol is // GENEVE, the supported port is 6081. If the target type is alb, the targeted // Application Load Balancer must have at least one listener whose port matches - // the target group port. Not used if the target is a Lambda function. + // the target group port. This parameter is not used if the target is a Lambda + // function. Port *int64 `min:"1" type:"integer"` } @@ -9680,16 +9726,16 @@ type TargetGroup struct { // type defaults to ipv4. IpAddressType *string `type:"string" enum:"TargetGroupIpAddressTypeEnum"` - // The Amazon Resource Names (ARN) of the load balancers that route traffic - // to this target group. + // The Amazon Resource Name (ARN) of the load balancer that routes traffic to + // this target group. You can use each target group with only one load balancer. LoadBalancerArns []*string `type:"list"` // The HTTP or gRPC codes to use when checking for a successful response from // a target. Matcher *Matcher `type:"structure"` - // The port on which the targets are listening. Not used if the target is a - // Lambda function. + // The port on which the targets are listening. This parameter is not used if + // the target is a Lambda function. Port *int64 `min:"1" type:"integer"` // The protocol to use for routing traffic to the targets. @@ -10314,6 +10360,22 @@ func AuthenticateOidcActionConditionalBehaviorEnum_Values() []string { } } +const ( + // EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnumOn is a EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum enum value + EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnumOn = "on" + + // EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnumOff is a EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum enum value + EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnumOff = "off" +) + +// EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum_Values returns all elements of the EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum enum +func EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum_Values() []string { + return []string{ + EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnumOn, + EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnumOff, + } +} + const ( // IpAddressTypeIpv4 is a IpAddressType enum value IpAddressTypeIpv4 = "ipv4" diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go index ed55d8224d..55c72ad686 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go @@ -1728,13 +1728,6 @@ func (c *Route53) CreateTrafficPolicyInstanceRequest(input *CreateTrafficPolicyI // responds to DNS queries for the domain or subdomain name by using the resource // record sets that CreateTrafficPolicyInstance created. // -// After you submit an CreateTrafficPolicyInstance request, there's a brief -// delay while Amazon Route 53 creates the resource record sets that are specified -// in the traffic policy definition. Use GetTrafficPolicyInstance with the id -// of new traffic policy instance to confirm that the CreateTrafficPolicyInstance -// request completed successfully. For more information, see the State response -// element. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4787,10 +4780,10 @@ func (c *Route53) GetTrafficPolicyInstanceRequest(input *GetTrafficPolicyInstanc // // Gets information about a specified traffic policy instance. // -// Use GetTrafficPolicyInstance with the id of new traffic policy instance to -// confirm that the CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance -// request completed successfully. For more information, see the State response -// element. +// After you submit a CreateTrafficPolicyInstance or an UpdateTrafficPolicyInstance +// request, there's a brief delay while Amazon Route 53 creates the resource +// record sets that are specified in the traffic policy definition. For more +// information, see the State response element. // // In the Route 53 console, traffic policy instances are known as policy records. // @@ -7512,12 +7505,6 @@ func (c *Route53) UpdateTrafficPolicyInstanceRequest(input *UpdateTrafficPolicyI // UpdateTrafficPolicyInstance API operation for Amazon Route 53. // -// After you submit a UpdateTrafficPolicyInstance request, there's a brief delay -// while Route 53 creates the resource record sets that are specified in the -// traffic policy definition. Use GetTrafficPolicyInstance with the id of updated -// traffic policy instance confirm that the UpdateTrafficPolicyInstance request -// completed successfully. For more information, see the State response element. -// // Updates the resource record sets in a specified hosted zone that were created // based on the settings in a specified traffic policy version. // @@ -20450,6 +20437,9 @@ const ( // CloudWatchRegionApSoutheast4 is a CloudWatchRegion enum value CloudWatchRegionApSoutheast4 = "ap-southeast-4" + + // CloudWatchRegionIlCentral1 is a CloudWatchRegion enum value + CloudWatchRegionIlCentral1 = "il-central-1" ) // CloudWatchRegion_Values returns all elements of the CloudWatchRegion enum @@ -20489,6 +20479,7 @@ func CloudWatchRegion_Values() []string { CloudWatchRegionUsIsoWest1, CloudWatchRegionUsIsobEast1, CloudWatchRegionApSoutheast4, + CloudWatchRegionIlCentral1, } } @@ -20819,6 +20810,9 @@ const ( // ResourceRecordSetRegionApSoutheast4 is a ResourceRecordSetRegion enum value ResourceRecordSetRegionApSoutheast4 = "ap-southeast-4" + + // ResourceRecordSetRegionIlCentral1 is a ResourceRecordSetRegion enum value + ResourceRecordSetRegionIlCentral1 = "il-central-1" ) // ResourceRecordSetRegion_Values returns all elements of the ResourceRecordSetRegion enum @@ -20853,6 +20847,7 @@ func ResourceRecordSetRegion_Values() []string { ResourceRecordSetRegionEuSouth1, ResourceRecordSetRegionEuSouth2, ResourceRecordSetRegionApSoutheast4, + ResourceRecordSetRegionIlCentral1, } } @@ -21011,6 +21006,9 @@ const ( // VPCRegionApSoutheast4 is a VPCRegion enum value VPCRegionApSoutheast4 = "ap-southeast-4" + + // VPCRegionIlCentral1 is a VPCRegion enum value + VPCRegionIlCentral1 = "il-central-1" ) // VPCRegion_Values returns all elements of the VPCRegion enum @@ -21049,5 +21047,6 @@ func VPCRegion_Values() []string { VPCRegionEuSouth1, VPCRegionEuSouth2, VPCRegionApSoutheast4, + VPCRegionIlCentral1, } } diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md index fb90e99e13..184ee6905c 100644 --- a/vendor/github.com/digitalocean/godo/CHANGELOG.md +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -1,5 +1,9 @@ # Change Log +## [v1.101.0] - 2023-08-09 + +- #619 - @danaelhe - Add retryablehttp Client Option + ## [v1.100.0] - 2023-07-20 - #618 - @asaha - load balancers: introduce new type field diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index b858f67e4d..3dcfa54e26 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -16,12 +16,13 @@ import ( "time" "github.com/google/go-querystring/query" + "github.com/hashicorp/go-retryablehttp" "golang.org/x/oauth2" "golang.org/x/time/rate" ) const ( - libraryVersion = "1.100.0" + libraryVersion = "1.101.0" defaultBaseURL = "https://api.digitalocean.com/" userAgent = "godo/" + libraryVersion mediaType = "application/json" @@ -92,6 +93,29 @@ type Client struct { // Optional rate limiter to ensure QoS. rateLimiter *rate.Limiter + + // Optional retry values. Setting the RetryConfig.RetryMax value enables automatically retrying requests + // that fail with 429 or 500-level response codes using the go-retryablehttp client + RetryConfig RetryConfig +} + +// RetryConfig sets the values used for enabling retries and backoffs for +// requests that fail with 429 or 500-level response codes using the go-retryablehttp client. +// RetryConfig.RetryMax must be configured to enable this behavior. RetryConfig.RetryWaitMin and +// RetryConfig.RetryWaitMax are optional, with the default values being 1.0 and 30.0, respectively. +// +// You can use +// +// godo.PtrTo(1.0) +// +// to explicitly set the RetryWaitMin and RetryWaitMax values. +// +// Note: Opting to use the go-retryablehttp client will overwrite any custom HTTP client passed into New(). +// Only the custom HTTP client's custom transport and timeout will be maintained. +type RetryConfig struct { + RetryMax int + RetryWaitMin *float64 // Minimum time to wait + RetryWaitMax *float64 // Maximum time to wait } // RequestCompletionCallback defines the type of the request callback function @@ -271,6 +295,33 @@ func New(httpClient *http.Client, opts ...ClientOpt) (*Client, error) { } } + // if retryMax is set it will use the retryablehttp client. + if c.RetryConfig.RetryMax > 0 { + retryableClient := retryablehttp.NewClient() + retryableClient.RetryMax = c.RetryConfig.RetryMax + + if c.RetryConfig.RetryWaitMin != nil { + retryableClient.RetryWaitMin = time.Duration(*c.RetryConfig.RetryWaitMin * float64(time.Second)) + } + if c.RetryConfig.RetryWaitMax != nil { + retryableClient.RetryWaitMax = time.Duration(*c.RetryConfig.RetryWaitMax * float64(time.Second)) + } + + // if timeout is set, it is maintained before overwriting client with StandardClient() + retryableClient.HTTPClient.Timeout = c.client.Timeout + + var source *oauth2.Transport + if _, ok := c.client.Transport.(*oauth2.Transport); ok { + source = c.client.Transport.(*oauth2.Transport) + } + c.client = retryableClient.StandardClient() + c.client.Transport = &oauth2.Transport{ + Base: c.client.Transport, + Source: source.Source, + } + + } + return c, nil } @@ -315,6 +366,17 @@ func SetStaticRateLimit(rps float64) ClientOpt { } } +// WithRetryAndBackoffs sets retry values. Setting the RetryConfig.RetryMax value enables automatically retrying requests +// that fail with 429 or 500-level response codes using the go-retryablehttp client +func WithRetryAndBackoffs(retryConfig RetryConfig) ClientOpt { + return func(c *Client) error { + c.RetryConfig.RetryMax = retryConfig.RetryMax + c.RetryConfig.RetryWaitMax = retryConfig.RetryWaitMax + c.RetryConfig.RetryWaitMin = retryConfig.RetryWaitMin + return nil + } +} + // NewRequest creates an API request. A relative URL can be provided in urlStr, which will be resolved to the // BaseURL of the Client. Relative URLS should always be specified without a preceding slash. If specified, the // value pointed to by body is JSON encoded and included in as the request body. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 0000000000..036e5313fc --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 0000000000..fe28d15b6f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,58 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func DefaultTransport() *http.Transport { + transport := DefaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// DefaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + ForceAttemptHTTP2: true, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// DefaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultPooledClient returns a new http.Client with similar default values to +// http.Client, but with a shared Transport. Do not use this function for +// transient clients as it can leak file descriptors over time. Only use this +// for clients that will be re-used for the same host(s). +func DefaultPooledClient() *http.Client { + return &http.Client{ + Transport: DefaultPooledTransport(), + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go new file mode 100644 index 0000000000..05841092a7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go @@ -0,0 +1,20 @@ +// Package cleanhttp offers convenience utilities for acquiring "clean" +// http.Transport and http.Client structs. +// +// Values set on http.DefaultClient and http.DefaultTransport affect all +// callers. This can have detrimental effects, esepcially in TLS contexts, +// where client or root certificates set to talk to multiple endpoints can end +// up displacing each other, leading to hard-to-debug issues. This package +// provides non-shared http.Client and http.Transport structs to ensure that +// the configuration will not be overwritten by other parts of the application +// or dependencies. +// +// The DefaultClient and DefaultTransport functions disable idle connections +// and keepalives. Without ensuring that idle connections are closed before +// garbage collection, short-term clients/transports can leak file descriptors, +// eventually leading to "too many open files" errors. If you will be +// connecting to the same hosts repeatedly from the same client, you can use +// DefaultPooledClient to receive a client that has connection pooling +// semantics similar to http.DefaultClient. +// +package cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 0000000000..3c845dc0dc --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,48 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r != nil { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + if next != nil { + next.ServeHTTP(w, r) + } + } + + return + }) +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore new file mode 100644 index 0000000000..4e309e0b32 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore @@ -0,0 +1,4 @@ +.idea/ +*.iml +*.test +.vscode/ \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md new file mode 100644 index 0000000000..33686e4da8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md @@ -0,0 +1,9 @@ +## 0.7.4 (Jun 6, 2023) + +BUG FIXES + +- client: fixing an issue where the Content-Type header wouldn't be sent with an empty payload when using HTTP/2 [GH-194] + +## 0.7.3 (May 15, 2023) + +Initial release diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS new file mode 100644 index 0000000000..f8389c995e --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS @@ -0,0 +1 @@ +* @hashicorp/release-engineering \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE new file mode 100644 index 0000000000..f4f97ee585 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE @@ -0,0 +1,365 @@ +Copyright (c) 2015 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile new file mode 100644 index 0000000000..da17640e64 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/Makefile @@ -0,0 +1,11 @@ +default: test + +test: + go vet ./... + go test -race ./... + +updatedeps: + go get -f -t -u ./... + go get -f -u ./... + +.PHONY: default test updatedeps diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md new file mode 100644 index 0000000000..8943becf19 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -0,0 +1,62 @@ +go-retryablehttp +================ + +[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/hashicorp/go-retryablehttp +[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp + +The `retryablehttp` package provides a familiar HTTP client interface with +automatic retries and exponential backoff. It is a thin wrapper over the +standard `net/http` client library and exposes nearly the same public API. This +makes `retryablehttp` very easy to drop into existing programs. + +`retryablehttp` performs automatic retries under certain conditions. Mainly, if +an error is returned by the client (connection errors, etc.), or if a 500-range +response code is received (except 501), then a retry is invoked after a wait +period. Otherwise, the response is returned and left to the caller to +interpret. + +The main difference from `net/http` is that requests which take a request body +(POST/PUT et. al) can have the body provided in a number of ways (some more or +less efficient) that allow "rewinding" the request body if the initial request +fails so that the full request can be attempted again. See the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more +details. + +Version 0.6.0 and before are compatible with Go prior to 1.12. From 0.6.1 onward, Go 1.12+ is required. +From 0.6.7 onward, Go 1.13+ is required. + +Example Use +=========== + +Using this library should look almost identical to what you would do with +`net/http`. The most simple example of a GET request is shown below: + +```go +resp, err := retryablehttp.Get("/foo") +if err != nil { + panic(err) +} +``` + +The returned response object is an `*http.Response`, the same thing you would +usually get from `net/http`. Had the request failed one or more times, the above +call would block and retry with exponential backoff. + +## Getting a stdlib `*http.Client` with retries + +It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`. +This makes use of retryablehttp broadly applicable with minimal effort. Simply +configure a `*retryablehttp.Client` as you wish, and then call `StandardClient()`: + +```go +retryClient := retryablehttp.NewClient() +retryClient.RetryMax = 10 + +standardClient := retryClient.StandardClient() // *http.Client +``` + +For more usage and examples see the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go new file mode 100644 index 0000000000..cad96bd97b --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -0,0 +1,832 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package retryablehttp provides a familiar HTTP client interface with +// automatic retries and exponential backoff. It is a thin wrapper over the +// standard net/http client library and exposes nearly the same public API. +// This makes retryablehttp very easy to drop into existing programs. +// +// retryablehttp performs automatic retries under certain conditions. Mainly, if +// an error is returned by the client (connection errors etc), or if a 500-range +// response is received, then a retry is invoked. Otherwise, the response is +// returned and left to the caller to interpret. +// +// Requests which take a request body should provide a non-nil function +// parameter. The best choice is to provide either a function satisfying +// ReaderFunc which provides multiple io.Readers in an efficient manner, a +// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte +// slice. As it is a reference type, and we will wrap it as needed by readers, +// we can efficiently re-use the request body without needing to copy it. If an +// io.Reader (such as a *bytes.Reader) is provided, the full body will be read +// prior to the first request, and will be efficiently re-used for any retries. +// ReadSeeker can be used, but some users have observed occasional data races +// between the net/http library and the Seek functionality of some +// implementations of ReadSeeker, so should be avoided if possible. +package retryablehttp + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "math/rand" + "net/http" + "net/url" + "os" + "regexp" + "strconv" + "strings" + "sync" + "time" + + cleanhttp "github.com/hashicorp/go-cleanhttp" +) + +var ( + // Default retry configuration + defaultRetryWaitMin = 1 * time.Second + defaultRetryWaitMax = 30 * time.Second + defaultRetryMax = 4 + + // defaultLogger is the logger provided with defaultClient + defaultLogger = log.New(os.Stderr, "", log.LstdFlags) + + // defaultClient is used for performing requests without explicitly making + // a new client. It is purposely private to avoid modifications. + defaultClient = NewClient() + + // We need to consume response bodies to maintain http connections, but + // limit the size we consume to respReadLimit. + respReadLimit = int64(4096) + + // A regular expression to match the error returned by net/http when the + // configured number of redirects is exhausted. This error isn't typed + // specifically so we resort to matching on the error string. + redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`) + + // A regular expression to match the error returned by net/http when the + // scheme specified in the URL is invalid. This error isn't typed + // specifically so we resort to matching on the error string. + schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`) + + // A regular expression to match the error returned by net/http when the + // TLS certificate is not trusted. This error isn't typed + // specifically so we resort to matching on the error string. + notTrustedErrorRe = regexp.MustCompile(`certificate is not trusted`) +) + +// ReaderFunc is the type of function that can be given natively to NewRequest +type ReaderFunc func() (io.Reader, error) + +// ResponseHandlerFunc is a type of function that takes in a Response, and does something with it. +// The ResponseHandlerFunc is called when the HTTP client successfully receives a response and the +// CheckRetry function indicates that a retry of the base request is not necessary. +// If an error is returned from this function, the CheckRetry policy will be used to determine +// whether to retry the whole request (including this handler). +// +// Make sure to check status codes! Even if the request was completed it may have a non-2xx status code. +// +// The response body is not automatically closed. It must be closed either by the ResponseHandlerFunc or +// by the caller out-of-band. Failure to do so will result in a memory leak. +type ResponseHandlerFunc func(*http.Response) error + +// LenReader is an interface implemented by many in-memory io.Reader's. Used +// for automatically sending the right Content-Length header when possible. +type LenReader interface { + Len() int +} + +// Request wraps the metadata needed to create HTTP requests. +type Request struct { + // body is a seekable reader over the request body payload. This is + // used to rewind the request data in between retries. + body ReaderFunc + + responseHandler ResponseHandlerFunc + + // Embed an HTTP request directly. This makes a *Request act exactly + // like an *http.Request so that all meta methods are supported. + *http.Request +} + +// WithContext returns wrapped Request with a shallow copy of underlying *http.Request +// with its context changed to ctx. The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + return &Request{ + body: r.body, + responseHandler: r.responseHandler, + Request: r.Request.WithContext(ctx), + } +} + +// SetResponseHandler allows setting the response handler. +func (r *Request) SetResponseHandler(fn ResponseHandlerFunc) { + r.responseHandler = fn +} + +// BodyBytes allows accessing the request body. It is an analogue to +// http.Request's Body variable, but it returns a copy of the underlying data +// rather than consuming it. +// +// This function is not thread-safe; do not call it at the same time as another +// call, or at the same time this request is being used with Client.Do. +func (r *Request) BodyBytes() ([]byte, error) { + if r.body == nil { + return nil, nil + } + body, err := r.body() + if err != nil { + return nil, err + } + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(body) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// SetBody allows setting the request body. +// +// It is useful if a new body needs to be set without constructing a new Request. +func (r *Request) SetBody(rawBody interface{}) error { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return err + } + r.body = bodyReader + r.ContentLength = contentLength + return nil +} + +// WriteTo allows copying the request body into a writer. +// +// It writes data to w until there's no more data to write or +// when an error occurs. The return int64 value is the number of bytes +// written. Any error encountered during the write is also returned. +// The signature matches io.WriterTo interface. +func (r *Request) WriteTo(w io.Writer) (int64, error) { + body, err := r.body() + if err != nil { + return 0, err + } + if c, ok := body.(io.Closer); ok { + defer c.Close() + } + return io.Copy(w, body) +} + +func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, error) { + var bodyReader ReaderFunc + var contentLength int64 + + switch body := rawBody.(type) { + // If they gave us a function already, great! Use it. + case ReaderFunc: + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + case func() (io.Reader, error): + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + // If a regular byte slice, we can read it over and over via new + // readers + case []byte: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // If a bytes.Buffer we can read the underlying byte slice over and + // over + case *bytes.Buffer: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf.Bytes()), nil + } + contentLength = int64(buf.Len()) + + // We prioritize *bytes.Reader here because we don't really want to + // deal with it seeking so want it to match here instead of the + // io.ReadSeeker case. + case *bytes.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // Compat case + case io.ReadSeeker: + raw := body + bodyReader = func() (io.Reader, error) { + _, err := raw.Seek(0, 0) + return ioutil.NopCloser(raw), err + } + if lr, ok := raw.(LenReader); ok { + contentLength = int64(lr.Len()) + } + + // Read all in so we can reset + case io.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + if len(buf) == 0 { + bodyReader = func() (io.Reader, error) { + return http.NoBody, nil + } + contentLength = 0 + } else { + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + } + + // No body provided, nothing to do + case nil: + + // Unrecognized type + default: + return nil, 0, fmt.Errorf("cannot handle type %T", rawBody) + } + return bodyReader, contentLength, nil +} + +// FromRequest wraps an http.Request in a retryablehttp.Request +func FromRequest(r *http.Request) (*Request, error) { + bodyReader, _, err := getBodyReaderAndContentLength(r.Body) + if err != nil { + return nil, err + } + // Could assert contentLength == r.ContentLength + return &Request{body: bodyReader, Request: r}, nil +} + +// NewRequest creates a new wrapped request. +func NewRequest(method, url string, rawBody interface{}) (*Request, error) { + return NewRequestWithContext(context.Background(), method, url, rawBody) +} + +// NewRequestWithContext creates a new wrapped request with the provided context. +// +// The context controls the entire lifetime of a request and its response: +// obtaining a connection, sending the request, and reading the response headers and body. +func NewRequestWithContext(ctx context.Context, method, url string, rawBody interface{}) (*Request, error) { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return nil, err + } + + httpReq, err := http.NewRequestWithContext(ctx, method, url, nil) + if err != nil { + return nil, err + } + httpReq.ContentLength = contentLength + + return &Request{body: bodyReader, Request: httpReq}, nil +} + +// Logger interface allows to use other loggers than +// standard log.Logger. +type Logger interface { + Printf(string, ...interface{}) +} + +// LeveledLogger is an interface that can be implemented by any logger or a +// logger wrapper to provide leveled logging. The methods accept a message +// string and a variadic number of key-value pairs. For log.Printf style +// formatting where message string contains a format specifier, use Logger +// interface. +type LeveledLogger interface { + Error(msg string, keysAndValues ...interface{}) + Info(msg string, keysAndValues ...interface{}) + Debug(msg string, keysAndValues ...interface{}) + Warn(msg string, keysAndValues ...interface{}) +} + +// hookLogger adapts an LeveledLogger to Logger for use by the existing hook functions +// without changing the API. +type hookLogger struct { + LeveledLogger +} + +func (h hookLogger) Printf(s string, args ...interface{}) { + h.Info(fmt.Sprintf(s, args...)) +} + +// RequestLogHook allows a function to run before each retry. The HTTP +// request which will be made, and the retry number (0 for the initial +// request) are available to users. The internal logger is exposed to +// consumers. +type RequestLogHook func(Logger, *http.Request, int) + +// ResponseLogHook is like RequestLogHook, but allows running a function +// on each HTTP response. This function will be invoked at the end of +// every HTTP request executed, regardless of whether a subsequent retry +// needs to be performed or not. If the response body is read or closed +// from this method, this will affect the response returned from Do(). +type ResponseLogHook func(Logger, *http.Response) + +// CheckRetry specifies a policy for handling retries. It is called +// following each request with the response and error values returned by +// the http.Client. If CheckRetry returns false, the Client stops retrying +// and returns the response to the caller. If CheckRetry returns an error, +// that error value is returned in lieu of the error from the request. The +// Client will close any response body when retrying, but if the retry is +// aborted it is up to the CheckRetry callback to properly close any +// response body before returning. +type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error) + +// Backoff specifies a policy for how long to wait between retries. +// It is called after a failing request to determine the amount of time +// that should pass before trying again. +type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration + +// ErrorHandler is called if retries are expired, containing the last status +// from the http library. If not specified, default behavior for the library is +// to close the body and return an error indicating how many tries were +// attempted. If overriding this, be sure to close the body if needed. +type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) + +// Client is used to make HTTP requests. It adds additional functionality +// like automatic retries to tolerate minor outages. +type Client struct { + HTTPClient *http.Client // Internal HTTP client. + Logger interface{} // Customer logger instance. Can be either Logger or LeveledLogger + + RetryWaitMin time.Duration // Minimum time to wait + RetryWaitMax time.Duration // Maximum time to wait + RetryMax int // Maximum number of retries + + // RequestLogHook allows a user-supplied function to be called + // before each retry. + RequestLogHook RequestLogHook + + // ResponseLogHook allows a user-supplied function to be called + // with the response from each HTTP request executed. + ResponseLogHook ResponseLogHook + + // CheckRetry specifies the policy for handling retries, and is called + // after each request. The default policy is DefaultRetryPolicy. + CheckRetry CheckRetry + + // Backoff specifies the policy for how long to wait between retries + Backoff Backoff + + // ErrorHandler specifies the custom error handler to use, if any + ErrorHandler ErrorHandler + + loggerInit sync.Once + clientInit sync.Once +} + +// NewClient creates a new Client with default settings. +func NewClient() *Client { + return &Client{ + HTTPClient: cleanhttp.DefaultPooledClient(), + Logger: defaultLogger, + RetryWaitMin: defaultRetryWaitMin, + RetryWaitMax: defaultRetryWaitMax, + RetryMax: defaultRetryMax, + CheckRetry: DefaultRetryPolicy, + Backoff: DefaultBackoff, + } +} + +func (c *Client) logger() interface{} { + c.loggerInit.Do(func() { + if c.Logger == nil { + return + } + + switch c.Logger.(type) { + case Logger, LeveledLogger: + // ok + default: + // This should happen in dev when they are setting Logger and work on code, not in prod. + panic(fmt.Sprintf("invalid logger type passed, must be Logger or LeveledLogger, was %T", c.Logger)) + } + }) + + return c.Logger +} + +// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which +// will retry on connection errors and server errors. +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + // don't propagate other errors + shouldRetry, _ := baseRetryPolicy(resp, err) + return shouldRetry, nil +} + +// ErrorPropagatedRetryPolicy is the same as DefaultRetryPolicy, except it +// propagates errors back instead of returning nil. This allows you to inspect +// why it decided to retry or not. +func ErrorPropagatedRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + return baseRetryPolicy(resp, err) +} + +func baseRetryPolicy(resp *http.Response, err error) (bool, error) { + if err != nil { + if v, ok := err.(*url.Error); ok { + // Don't retry if the error was due to too many redirects. + if redirectsErrorRe.MatchString(v.Error()) { + return false, v + } + + // Don't retry if the error was due to an invalid protocol scheme. + if schemeErrorRe.MatchString(v.Error()) { + return false, v + } + + // Don't retry if the error was due to TLS cert verification failure. + if notTrustedErrorRe.MatchString(v.Error()) { + return false, v + } + if _, ok := v.Err.(x509.UnknownAuthorityError); ok { + return false, v + } + } + + // The error is likely recoverable so retry. + return true, nil + } + + // 429 Too Many Requests is recoverable. Sometimes the server puts + // a Retry-After response header to indicate when the server is + // available to start processing request from client. + if resp.StatusCode == http.StatusTooManyRequests { + return true, nil + } + + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != http.StatusNotImplemented) { + return true, fmt.Errorf("unexpected HTTP status %s", resp.Status) + } + + return false, nil +} + +// DefaultBackoff provides a default callback for Client.Backoff which +// will perform exponential backoff based on the attempt number and limited +// by the provided minimum and maximum durations. +// +// It also tries to parse Retry-After response header when a http.StatusTooManyRequests +// (HTTP Code 429) is found in the resp parameter. Hence it will return the number of +// seconds the server states it may be ready to process more requests from this client. +func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + if resp != nil { + if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable { + if s, ok := resp.Header["Retry-After"]; ok { + if sleep, err := strconv.ParseInt(s[0], 10, 64); err == nil { + return time.Second * time.Duration(sleep) + } + } + } + } + + mult := math.Pow(2, float64(attemptNum)) * float64(min) + sleep := time.Duration(mult) + if float64(sleep) != mult || sleep > max { + sleep = max + } + return sleep +} + +// LinearJitterBackoff provides a callback for Client.Backoff which will +// perform linear backoff based on the attempt number and with jitter to +// prevent a thundering herd. +// +// min and max here are *not* absolute values. The number to be multiplied by +// the attempt number will be chosen at random from between them, thus they are +// bounding the jitter. +// +// For instance: +// * To get strictly linear backoff of one second increasing each retry, set +// both to one second (1s, 2s, 3s, 4s, ...) +// * To get a small amount of jitter centered around one second increasing each +// retry, set to around one second, such as a min of 800ms and max of 1200ms +// (892ms, 2102ms, 2945ms, 4312ms, ...) +// * To get extreme jitter, set to a very wide spread, such as a min of 100ms +// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...) +func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // attemptNum always starts at zero but we want to start at 1 for multiplication + attemptNum++ + + if max <= min { + // Unclear what to do here, or they are the same, so return min * + // attemptNum + return min * time.Duration(attemptNum) + } + + // Seed rand; doing this every time is fine + rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + + // Pick a random number that lies somewhere between the min and max and + // multiply by the attemptNum. attemptNum starts at zero so we always + // increment here. We first get a random percentage, then apply that to the + // difference between min and max, and add to min. + jitter := rand.Float64() * float64(max-min) + jitterMin := int64(jitter) + int64(min) + return time.Duration(jitterMin * int64(attemptNum)) +} + +// PassthroughErrorHandler is an ErrorHandler that directly passes through the +// values from the net/http library for the final request. The body is not +// closed. +func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) { + return resp, err +} + +// Do wraps calling an HTTP method with retries. +func (c *Client) Do(req *Request) (*http.Response, error) { + c.clientInit.Do(func() { + if c.HTTPClient == nil { + c.HTTPClient = cleanhttp.DefaultPooledClient() + } + }) + + logger := c.logger() + + if logger != nil { + switch v := logger.(type) { + case LeveledLogger: + v.Debug("performing request", "method", req.Method, "url", req.URL) + case Logger: + v.Printf("[DEBUG] %s %s", req.Method, req.URL) + } + } + + var resp *http.Response + var attempt int + var shouldRetry bool + var doErr, respErr, checkErr error + + for i := 0; ; i++ { + doErr, respErr = nil, nil + attempt++ + + // Always rewind the request body when non-nil. + if req.body != nil { + body, err := req.body() + if err != nil { + c.HTTPClient.CloseIdleConnections() + return resp, err + } + if c, ok := body.(io.ReadCloser); ok { + req.Body = c + } else { + req.Body = ioutil.NopCloser(body) + } + } + + if c.RequestLogHook != nil { + switch v := logger.(type) { + case LeveledLogger: + c.RequestLogHook(hookLogger{v}, req.Request, i) + case Logger: + c.RequestLogHook(v, req.Request, i) + default: + c.RequestLogHook(nil, req.Request, i) + } + } + + // Attempt the request + resp, doErr = c.HTTPClient.Do(req.Request) + + // Check if we should continue with retries. + shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, doErr) + if !shouldRetry && doErr == nil && req.responseHandler != nil { + respErr = req.responseHandler(resp) + shouldRetry, checkErr = c.CheckRetry(req.Context(), resp, respErr) + } + + err := doErr + if respErr != nil { + err = respErr + } + if err != nil { + switch v := logger.(type) { + case LeveledLogger: + v.Error("request failed", "error", err, "method", req.Method, "url", req.URL) + case Logger: + v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + } + } else { + // Call this here to maintain the behavior of logging all requests, + // even if CheckRetry signals to stop. + if c.ResponseLogHook != nil { + // Call the response logger function if provided. + switch v := logger.(type) { + case LeveledLogger: + c.ResponseLogHook(hookLogger{v}, resp) + case Logger: + c.ResponseLogHook(v, resp) + default: + c.ResponseLogHook(nil, resp) + } + } + } + + if !shouldRetry { + break + } + + // We do this before drainBody because there's no need for the I/O if + // we're breaking out + remain := c.RetryMax - i + if remain <= 0 { + break + } + + // We're going to retry, consume any response to reuse the connection. + if doErr == nil { + c.drainBody(resp.Body) + } + + wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) + if logger != nil { + desc := fmt.Sprintf("%s %s", req.Method, req.URL) + if resp != nil { + desc = fmt.Sprintf("%s (status: %d)", desc, resp.StatusCode) + } + switch v := logger.(type) { + case LeveledLogger: + v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain) + case Logger: + v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + } + } + timer := time.NewTimer(wait) + select { + case <-req.Context().Done(): + timer.Stop() + c.HTTPClient.CloseIdleConnections() + return nil, req.Context().Err() + case <-timer.C: + } + + // Make shallow copy of http Request so that we can modify its body + // without racing against the closeBody call in persistConn.writeLoop. + httpreq := *req.Request + req.Request = &httpreq + } + + // this is the closest we have to success criteria + if doErr == nil && respErr == nil && checkErr == nil && !shouldRetry { + return resp, nil + } + + defer c.HTTPClient.CloseIdleConnections() + + var err error + if checkErr != nil { + err = checkErr + } else if respErr != nil { + err = respErr + } else { + err = doErr + } + + if c.ErrorHandler != nil { + return c.ErrorHandler(resp, err, attempt) + } + + // By default, we close the response body and return an error without + // returning the response + if resp != nil { + c.drainBody(resp.Body) + } + + // this means CheckRetry thought the request was a failure, but didn't + // communicate why + if err == nil { + return nil, fmt.Errorf("%s %s giving up after %d attempt(s)", + req.Method, req.URL, attempt) + } + + return nil, fmt.Errorf("%s %s giving up after %d attempt(s): %w", + req.Method, req.URL, attempt, err) +} + +// Try to read the response body so we can reuse this connection. +func (c *Client) drainBody(body io.ReadCloser) { + defer body.Close() + _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) + if err != nil { + if c.logger() != nil { + switch v := c.logger().(type) { + case LeveledLogger: + v.Error("error reading response body", "error", err) + case Logger: + v.Printf("[ERR] error reading response body: %v", err) + } + } + } +} + +// Get is a shortcut for doing a GET request without making a new client. +func Get(url string) (*http.Response, error) { + return defaultClient.Get(url) +} + +// Get is a convenience helper for doing simple GET requests. +func (c *Client) Get(url string) (*http.Response, error) { + req, err := NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Head is a shortcut for doing a HEAD request without making a new client. +func Head(url string) (*http.Response, error) { + return defaultClient.Head(url) +} + +// Head is a convenience method for doing simple HEAD requests. +func (c *Client) Head(url string) (*http.Response, error) { + req, err := NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Post is a shortcut for doing a POST request without making a new client. +func Post(url, bodyType string, body interface{}) (*http.Response, error) { + return defaultClient.Post(url, bodyType, body) +} + +// Post is a convenience method for doing simple POST requests. +func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) { + req, err := NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return c.Do(req) +} + +// PostForm is a shortcut to perform a POST with form data without creating +// a new client. +func PostForm(url string, data url.Values) (*http.Response, error) { + return defaultClient.PostForm(url, data) +} + +// PostForm is a convenience method for doing simple POST operations using +// pre-filled url.Values form data. +func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// StandardClient returns a stdlib *http.Client with a custom Transport, which +// shims in a *retryablehttp.Client for added retries. +func (c *Client) StandardClient() *http.Client { + return &http.Client{ + Transport: &RoundTripper{Client: c}, + } +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go new file mode 100644 index 0000000000..8c407adb3b --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go @@ -0,0 +1,55 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package retryablehttp + +import ( + "errors" + "net/http" + "net/url" + "sync" +) + +// RoundTripper implements the http.RoundTripper interface, using a retrying +// HTTP client to execute requests. +// +// It is important to note that retryablehttp doesn't always act exactly as a +// RoundTripper should. This is highly dependent on the retryable client's +// configuration. +type RoundTripper struct { + // The client to use during requests. If nil, the default retryablehttp + // client and settings will be used. + Client *Client + + // once ensures that the logic to initialize the default client runs at + // most once, in a single thread. + once sync.Once +} + +// init initializes the underlying retryable client. +func (rt *RoundTripper) init() { + if rt.Client == nil { + rt.Client = NewClient() + } +} + +// RoundTrip satisfies the http.RoundTripper interface. +func (rt *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + rt.once.Do(rt.init) + + // Convert the request to be retryable. + retryableReq, err := FromRequest(req) + if err != nil { + return nil, err + } + + // Execute the request. + resp, err := rt.Client.Do(retryableReq) + // If we got an error returned by standard library's `Do` method, unwrap it + // otherwise we will wind up erroneously re-nesting the error. + if _, ok := err.(*url.Error); ok { + return resp, errors.Unwrap(err) + } + + return resp, err +} diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/action.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/action.go index dee441d8e8..3131266f70 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/action.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/action.go @@ -145,7 +145,7 @@ func (c *ActionClient) All(ctx context.Context) ([]*Action, error) { // AllWithOpts returns all actions for the given options. func (c *ActionClient) AllWithOpts(ctx context.Context, opts ActionListOpts) ([]*Action, error) { - var allActions []*Action + allActions := []*Action{} err := c.client.all(func(page int) (*Response, error) { opts.Page = page @@ -216,6 +216,13 @@ func (c *ActionClient) WatchOverallProgress(ctx context.Context, actions []*Acti errCh <- err return } + if len(as) == 0 { + // No actions returned for the provided IDs, they do not exist in the API. + // We need to catch and fail early for this, otherwise the loop will continue + // indefinitely. + errCh <- fmt.Errorf("failed to wait for actions: remaining actions (%v) are not returned from API", opts.ID) + return + } for _, a := range as { switch a.Status { @@ -282,6 +289,10 @@ func (c *ActionClient) WatchProgress(ctx context.Context, action *Action) (<-cha errCh <- err return } + if a == nil { + errCh <- fmt.Errorf("failed to wait for action %d: action not returned from API", action.ID) + return + } switch a.Status { case ActionStatusRunning: diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/certificate.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/certificate.go index 050418f280..8c3cea4496 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/certificate.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/certificate.go @@ -182,7 +182,7 @@ func (c *CertificateClient) All(ctx context.Context) ([]*Certificate, error) { // AllWithOpts returns all Certificates for the given options. func (c *CertificateClient) AllWithOpts(ctx context.Context, opts CertificateListOpts) ([]*Certificate, error) { - var allCertificates []*Certificate + allCertificates := []*Certificate{} err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/datacenter.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/datacenter.go index c32072c9c0..4c30de3b9d 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/datacenter.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/datacenter.go @@ -116,7 +116,7 @@ func (c *DatacenterClient) All(ctx context.Context) ([]*Datacenter, error) { // AllWithOpts returns all datacenters for the given options. func (c *DatacenterClient) AllWithOpts(ctx context.Context, opts DatacenterListOpts) ([]*Datacenter, error) { - var allDatacenters []*Datacenter + allDatacenters := []*Datacenter{} err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/firewall.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/firewall.go index 5ccb14349d..811d727a6d 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/firewall.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/firewall.go @@ -180,7 +180,7 @@ func (c *FirewallClient) All(ctx context.Context) ([]*Firewall, error) { // AllWithOpts returns all Firewalls for the given options. func (c *FirewallClient) AllWithOpts(ctx context.Context, opts FirewallListOpts) ([]*Firewall, error) { - var allFirewalls []*Firewall + allFirewalls := []*Firewall{} err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/floating_ip.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/floating_ip.go index 493ade254a..9123001d25 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/floating_ip.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/floating_ip.go @@ -181,7 +181,7 @@ func (c *FloatingIPClient) All(ctx context.Context) ([]*FloatingIP, error) { // AllWithOpts returns all Floating IPs for the given options. func (c *FloatingIPClient) AllWithOpts(ctx context.Context, opts FloatingIPListOpts) ([]*FloatingIP, error) { - var allFloatingIPs []*FloatingIP + allFloatingIPs := []*FloatingIP{} err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go index 4acbfccf1b..2571a4e557 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go @@ -2,4 +2,4 @@ package hcloud // Version is the library's version following Semantic Versioning. -const Version = "1.48.0" // x-release-please-version +const Version = "1.49.0" // x-release-please-version diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/internal/instrumentation/metrics.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/internal/instrumentation/metrics.go index 69a7165ba8..aa57c7107c 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/internal/instrumentation/metrics.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/internal/instrumentation/metrics.go @@ -23,29 +23,36 @@ func New(subsystemIdentifier string, instrumentationRegistry *prometheus.Registr // InstrumentedRoundTripper returns an instrumented round tripper. func (i *Instrumenter) InstrumentedRoundTripper() http.RoundTripper { - inFlightRequestsGauge := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: fmt.Sprintf("hcloud_%s_in_flight_requests", i.subsystemIdentifier), - Help: fmt.Sprintf("A gauge of in-flight requests to the hcloud %s.", i.subsystemIdentifier), - }) - - requestsPerEndpointCounter := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: fmt.Sprintf("hcloud_%s_requests_total", i.subsystemIdentifier), - Help: fmt.Sprintf("A counter for requests to the hcloud %s per endpoint.", i.subsystemIdentifier), - }, - []string{"code", "method", "api_endpoint"}, + inFlightRequestsGauge := registerOrReuse( + i.instrumentationRegistry, + prometheus.NewGauge(prometheus.GaugeOpts{ + Name: fmt.Sprintf("hcloud_%s_in_flight_requests", i.subsystemIdentifier), + Help: fmt.Sprintf("A gauge of in-flight requests to the hcloud %s.", i.subsystemIdentifier), + }), ) - requestLatencyHistogram := prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: fmt.Sprintf("hcloud_%s_request_duration_seconds", i.subsystemIdentifier), - Help: fmt.Sprintf("A histogram of request latencies to the hcloud %s .", i.subsystemIdentifier), - Buckets: prometheus.DefBuckets, - }, - []string{"method"}, + requestsPerEndpointCounter := registerOrReuse( + i.instrumentationRegistry, + prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: fmt.Sprintf("hcloud_%s_requests_total", i.subsystemIdentifier), + Help: fmt.Sprintf("A counter for requests to the hcloud %s per endpoint.", i.subsystemIdentifier), + }, + []string{"code", "method", "api_endpoint"}, + ), ) - i.instrumentationRegistry.MustRegister(requestsPerEndpointCounter, requestLatencyHistogram, inFlightRequestsGauge) + requestLatencyHistogram := registerOrReuse( + i.instrumentationRegistry, + prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: fmt.Sprintf("hcloud_%s_request_duration_seconds", i.subsystemIdentifier), + Help: fmt.Sprintf("A histogram of request latencies to the hcloud %s .", i.subsystemIdentifier), + Buckets: prometheus.DefBuckets, + }, + []string{"method"}, + ), + ) return promhttp.InstrumentRoundTripperInFlight(inFlightRequestsGauge, promhttp.InstrumentRoundTripperDuration(requestLatencyHistogram, @@ -74,6 +81,27 @@ func (i *Instrumenter) instrumentRoundTripperEndpoint(counter *prometheus.Counte } } +// registerOrReuse will try to register the passed Collector, but in case a conflicting collector was already registered, +// it will instead return that collector. Make sure to always use the collector return by this method. +// Similar to [Registry.MustRegister] it will panic if any other error occurs. +func registerOrReuse[C prometheus.Collector](registry *prometheus.Registry, collector C) C { + err := registry.Register(collector) + if err != nil { + // If we get a AlreadyRegisteredError we can return the existing collector + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + if existingCollector, ok := are.ExistingCollector.(C); ok { + collector = existingCollector + } else { + panic("received incompatible existing collector") + } + } else { + panic(err) + } + } + + return collector +} + func preparePathForLabel(path string) string { path = strings.ToLower(path) diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go index 3b7dcb4703..a858620b97 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/iso.go @@ -143,7 +143,7 @@ func (c *ISOClient) All(ctx context.Context) ([]*ISO, error) { // AllWithOpts returns all ISOs for the given options. func (c *ISOClient) AllWithOpts(ctx context.Context, opts ISOListOpts) ([]*ISO, error) { - allISOs := make([]*ISO, 0) + allISOs := []*ISO{} err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer.go index 5db651ade0..d069836c88 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer.go @@ -327,7 +327,7 @@ func (c *LoadBalancerClient) All(ctx context.Context) ([]*LoadBalancer, error) { // AllWithOpts returns all Load Balancers for the given options. func (c *LoadBalancerClient) AllWithOpts(ctx context.Context, opts LoadBalancerListOpts) ([]*LoadBalancer, error) { - var allLoadBalancers []*LoadBalancer + allLoadBalancers := []*LoadBalancer{} err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer_type.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer_type.go index a550df2267..9a32e87d02 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer_type.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/load_balancer_type.go @@ -113,7 +113,7 @@ func (c *LoadBalancerTypeClient) All(ctx context.Context) ([]*LoadBalancerType, // AllWithOpts returns all Load Balancer types for the given options. func (c *LoadBalancerTypeClient) AllWithOpts(ctx context.Context, opts LoadBalancerTypeListOpts) ([]*LoadBalancerType, error) { - var allLoadBalancerTypes []*LoadBalancerType + allLoadBalancerTypes := []*LoadBalancerType{} err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/location.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/location.go index 85c5bd3ac2..6b8cbec9b6 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/location.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/location.go @@ -113,7 +113,7 @@ func (c *LocationClient) All(ctx context.Context) ([]*Location, error) { // AllWithOpts returns all locations for the given options. func (c *LocationClient) AllWithOpts(ctx context.Context, opts LocationListOpts) ([]*Location, error) { - var allLocations []*Location + allLocations := []*Location{} err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/metadata/client.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/metadata/client.go index 4c173d757c..52e5876aca 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/metadata/client.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/metadata/client.go @@ -7,6 +7,7 @@ import ( "net/http" "strconv" "strings" + "time" "github.com/prometheus/client_golang/prometheus" @@ -18,46 +19,57 @@ const Endpoint = "http://169.254.169.254/hetzner/v1/metadata" // Client is a client for the Hetzner Cloud Server Metadata Endpoints. type Client struct { endpoint string + timeout time.Duration httpClient *http.Client instrumentationRegistry *prometheus.Registry } -// A ClientOption is used to configure a Client. +// A ClientOption is used to configure a [Client]. type ClientOption func(*Client) -// WithEndpoint configures a Client to use the specified Metadata API endpoint. +// WithEndpoint configures a [Client] to use the specified Metadata API endpoint. func WithEndpoint(endpoint string) ClientOption { return func(client *Client) { client.endpoint = strings.TrimRight(endpoint, "/") } } -// WithHTTPClient configures a Client to perform HTTP requests with httpClient. +// WithHTTPClient configures a [Client] to perform HTTP requests with httpClient. func WithHTTPClient(httpClient *http.Client) ClientOption { return func(client *Client) { client.httpClient = httpClient } } -// WithInstrumentation configures a Client to collect metrics about the performed HTTP requests. +// WithInstrumentation configures a [Client] to collect metrics about the performed HTTP requests. func WithInstrumentation(registry *prometheus.Registry) ClientOption { return func(client *Client) { client.instrumentationRegistry = registry } } -// NewClient creates a new client. +// WithTimeout specifies a time limit for requests made by this [Client]. Defaults to 5 seconds. +func WithTimeout(timeout time.Duration) ClientOption { + return func(client *Client) { + client.timeout = timeout + } +} + +// NewClient creates a new [Client] with the options applied. func NewClient(options ...ClientOption) *Client { client := &Client{ endpoint: Endpoint, httpClient: &http.Client{}, + timeout: 5 * time.Second, } for _, option := range options { option(client) } + client.httpClient.Timeout = client.timeout + if client.instrumentationRegistry != nil { i := instrumentation.New("metadata", client.instrumentationRegistry) client.httpClient.Transport = i.InstrumentedRoundTripper() @@ -65,8 +77,7 @@ func NewClient(options ...ClientOption) *Client { return client } -// NewRequest creates an HTTP request against the API. The returned request -// is assigned with ctx and has all necessary headers set (auth, user agent, etc.). +// get executes an HTTP request against the API. func (c *Client) get(path string) (string, error) { url := c.endpoint + path resp, err := c.httpClient.Get(url) diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/network.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/network.go index 1c01be1ea1..b0b9e3a61d 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/network.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/network.go @@ -162,7 +162,7 @@ func (c *NetworkClient) All(ctx context.Context) ([]*Network, error) { // AllWithOpts returns all networks for the given options. func (c *NetworkClient) AllWithOpts(ctx context.Context, opts NetworkListOpts) ([]*Network, error) { - var allNetworks []*Network + allNetworks := []*Network{} err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/placement_group.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/placement_group.go index 4f5a25ba19..3eb277f210 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/placement_group.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/placement_group.go @@ -133,7 +133,7 @@ func (c *PlacementGroupClient) All(ctx context.Context) ([]*PlacementGroup, erro // AllWithOpts returns all PlacementGroups for the given options. func (c *PlacementGroupClient) AllWithOpts(ctx context.Context, opts PlacementGroupListOpts) ([]*PlacementGroup, error) { - var allPlacementGroups []*PlacementGroup + allPlacementGroups := []*PlacementGroup{} err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/primary_ip.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/primary_ip.go index 7e32193c34..50088e9fe5 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/primary_ip.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/primary_ip.go @@ -265,7 +265,7 @@ func (c *PrimaryIPClient) All(ctx context.Context) ([]*PrimaryIP, error) { // AllWithOpts returns all Primary IPs for the given options. func (c *PrimaryIPClient) AllWithOpts(ctx context.Context, opts PrimaryIPListOpts) ([]*PrimaryIP, error) { - var allPrimaryIPs []*PrimaryIP + allPrimaryIPs := []*PrimaryIP{} err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go index e4119ad23f..823f656d43 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/server_type.go @@ -140,7 +140,7 @@ func (c *ServerTypeClient) All(ctx context.Context) ([]*ServerType, error) { // AllWithOpts returns all server types for the given options. func (c *ServerTypeClient) AllWithOpts(ctx context.Context, opts ServerTypeListOpts) ([]*ServerType, error) { - var allServerTypes []*ServerType + allServerTypes := []*ServerType{} err := c.client.all(func(page int) (*Response, error) { opts.Page = page diff --git a/vendor/github.com/pkg/sftp/attrs.go b/vendor/github.com/pkg/sftp/attrs.go index 2bb2d57645..758cd4ff97 100644 --- a/vendor/github.com/pkg/sftp/attrs.go +++ b/vendor/github.com/pkg/sftp/attrs.go @@ -1,7 +1,7 @@ package sftp // ssh_FXP_ATTRS support -// see http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 +// see https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-5 import ( "os" @@ -69,6 +69,20 @@ func fileInfoFromStat(stat *FileStat, name string) os.FileInfo { } } +// FileInfoUidGid extends os.FileInfo and adds callbacks for Uid and Gid retrieval, +// as an alternative to *syscall.Stat_t objects on unix systems. +type FileInfoUidGid interface { + os.FileInfo + Uid() uint32 + Gid() uint32 +} + +// FileInfoUidGid extends os.FileInfo and adds a callbacks for extended data retrieval. +type FileInfoExtendedData interface { + os.FileInfo + Extended() []StatExtended +} + func fileStatFromInfo(fi os.FileInfo) (uint32, *FileStat) { mtime := fi.ModTime().Unix() atime := mtime @@ -86,5 +100,22 @@ func fileStatFromInfo(fi os.FileInfo) (uint32, *FileStat) { // os specific file stat decoding fileStatFromInfoOs(fi, &flags, fileStat) + // The call above will include the sshFileXferAttrUIDGID in case + // the os.FileInfo can be casted to *syscall.Stat_t on unix. + // If fi implements FileInfoUidGid, retrieve Uid, Gid from it instead. + if fiExt, ok := fi.(FileInfoUidGid); ok { + flags |= sshFileXferAttrUIDGID + fileStat.UID = fiExt.Uid() + fileStat.GID = fiExt.Gid() + } + + // if fi implements FileInfoExtendedData, retrieve extended data from it + if fiExt, ok := fi.(FileInfoExtendedData); ok { + fileStat.Extended = fiExt.Extended() + if len(fileStat.Extended) > 0 { + flags |= sshFileXferAttrExtended + } + } + return flags, fileStat } diff --git a/vendor/github.com/pkg/sftp/attrs_stubs.go b/vendor/github.com/pkg/sftp/attrs_stubs.go index c01f33677e..d20348c115 100644 --- a/vendor/github.com/pkg/sftp/attrs_stubs.go +++ b/vendor/github.com/pkg/sftp/attrs_stubs.go @@ -1,3 +1,4 @@ +//go:build plan9 || windows || android // +build plan9 windows android package sftp diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go index d1f4452415..371ae9b9b8 100644 --- a/vendor/github.com/pkg/sftp/attrs_unix.go +++ b/vendor/github.com/pkg/sftp/attrs_unix.go @@ -1,3 +1,4 @@ +//go:build darwin || dragonfly || freebsd || (!android && linux) || netbsd || openbsd || solaris || aix || js // +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix js package sftp diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go index 9e0b61645e..0df125e15d 100644 --- a/vendor/github.com/pkg/sftp/client.go +++ b/vendor/github.com/pkg/sftp/client.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "errors" + "fmt" "io" "math" "os" @@ -226,15 +227,22 @@ func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Clie if err := sftp.sendInit(); err != nil { wr.Close() - return nil, err + return nil, fmt.Errorf("error sending init packet to server: %w", err) } + if err := sftp.recvVersion(); err != nil { wr.Close() - return nil, err + return nil, fmt.Errorf("error receiving version packet from server: %w", err) } sftp.clientConn.wg.Add(1) - go sftp.loop() + go func() { + defer sftp.clientConn.wg.Done() + + if err := sftp.clientConn.recv(); err != nil { + sftp.clientConn.broadcastErr(err) + } + }() return sftp, nil } @@ -251,11 +259,11 @@ func (c *Client) Create(path string) (*File, error) { return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC)) } -const sftpProtocolVersion = 3 // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +const sftpProtocolVersion = 3 // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt func (c *Client) sendInit() error { return c.clientConn.conn.sendPacket(&sshFxInitPacket{ - Version: sftpProtocolVersion, // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 + Version: sftpProtocolVersion, // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt }) } @@ -267,8 +275,13 @@ func (c *Client) nextID() uint32 { func (c *Client) recvVersion() error { typ, data, err := c.recvPacket(0) if err != nil { + if err == io.EOF { + return fmt.Errorf("server unexpectedly closed connection: %w", io.ErrUnexpectedEOF) + } + return err } + if typ != sshFxpVersion { return &unexpectedPacketErr{sshFxpVersion, typ} } @@ -277,6 +290,7 @@ func (c *Client) recvVersion() error { if err != nil { return err } + if version != sftpProtocolVersion { return &unexpectedVersionErr{sftpProtocolVersion, version} } @@ -910,6 +924,45 @@ func (c *Client) MkdirAll(path string) error { return nil } +// RemoveAll delete files recursively in the directory and Recursively delete subdirectories. +// An error will be returned if no file or directory with the specified path exists +func (c *Client) RemoveAll(path string) error { + + // Get the file/directory information + fi, err := c.Stat(path) + if err != nil { + return err + } + + if fi.IsDir() { + // Delete files recursively in the directory + files, err := c.ReadDir(path) + if err != nil { + return err + } + + for _, file := range files { + if file.IsDir() { + // Recursively delete subdirectories + err = c.RemoveAll(path + "/" + file.Name()) + if err != nil { + return err + } + } else { + // Delete individual files + err = c.Remove(path + "/" + file.Name()) + if err != nil { + return err + } + } + } + + } + + return c.Remove(path) + +} + // File represents a remote file. type File struct { c *Client @@ -1660,7 +1713,7 @@ func (f *File) ReadFromWithConcurrency(r io.Reader, concurrency int) (read int64 Handle: f.handle, Offset: uint64(off), Length: uint32(n), - Data: b, + Data: b[:n], }) select { diff --git a/vendor/github.com/pkg/sftp/conn.go b/vendor/github.com/pkg/sftp/conn.go index 7d95142378..3bb2ba15f5 100644 --- a/vendor/github.com/pkg/sftp/conn.go +++ b/vendor/github.com/pkg/sftp/conn.go @@ -18,7 +18,9 @@ type conn struct { } // the orderID is used in server mode if the allocator is enabled. -// For the client mode just pass 0 +// For the client mode just pass 0. +// It returns io.EOF if the connection is closed and +// there are no more packets to read. func (c *conn) recvPacket(orderID uint32) (uint8, []byte, error) { return recvPacket(c, c.alloc, orderID) } @@ -61,14 +63,6 @@ func (c *clientConn) Close() error { return c.conn.Close() } -func (c *clientConn) loop() { - defer c.wg.Done() - err := c.recv() - if err != nil { - c.broadcastErr(err) - } -} - // recv continuously reads from the server and forwards responses to the // appropriate channel. func (c *clientConn) recv() error { diff --git a/vendor/github.com/pkg/sftp/debug.go b/vendor/github.com/pkg/sftp/debug.go index 3e264abe30..f0db14d3ac 100644 --- a/vendor/github.com/pkg/sftp/debug.go +++ b/vendor/github.com/pkg/sftp/debug.go @@ -1,3 +1,4 @@ +//go:build debug // +build debug package sftp diff --git a/vendor/github.com/pkg/sftp/fuzz.go b/vendor/github.com/pkg/sftp/fuzz.go index 169aebc284..f2f1fc31c1 100644 --- a/vendor/github.com/pkg/sftp/fuzz.go +++ b/vendor/github.com/pkg/sftp/fuzz.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz package sftp diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go index eed61bfc64..3aec937f04 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx // Attributes related flags. const ( @@ -12,7 +12,7 @@ const ( // Attributes defines the file attributes type defined in draft-ietf-secsh-filexfer-02 // -// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 +// Defined in: https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-5 type Attributes struct { Flags uint32 @@ -116,32 +116,32 @@ func (a *Attributes) Len() int { } // MarshalInto marshals e onto the end of the given Buffer. -func (a *Attributes) MarshalInto(b *Buffer) { - b.AppendUint32(a.Flags) +func (a *Attributes) MarshalInto(buf *Buffer) { + buf.AppendUint32(a.Flags) if a.Flags&AttrSize != 0 { - b.AppendUint64(a.Size) + buf.AppendUint64(a.Size) } if a.Flags&AttrUIDGID != 0 { - b.AppendUint32(a.UID) - b.AppendUint32(a.GID) + buf.AppendUint32(a.UID) + buf.AppendUint32(a.GID) } if a.Flags&AttrPermissions != 0 { - b.AppendUint32(uint32(a.Permissions)) + buf.AppendUint32(uint32(a.Permissions)) } if a.Flags&AttrACModTime != 0 { - b.AppendUint32(a.ATime) - b.AppendUint32(a.MTime) + buf.AppendUint32(a.ATime) + buf.AppendUint32(a.MTime) } if a.Flags&AttrExtended != 0 { - b.AppendUint32(uint32(len(a.ExtendedAttributes))) + buf.AppendUint32(uint32(len(a.ExtendedAttributes))) for _, ext := range a.ExtendedAttributes { - ext.MarshalInto(b) + ext.MarshalInto(buf) } } } @@ -156,74 +156,51 @@ func (a *Attributes) MarshalBinary() ([]byte, error) { // UnmarshalFrom unmarshals an Attributes from the given Buffer into e. // // NOTE: The values of fields not covered in the a.Flags are explicitly undefined. -func (a *Attributes) UnmarshalFrom(b *Buffer) (err error) { - flags, err := b.ConsumeUint32() - if err != nil { - return err - } +func (a *Attributes) UnmarshalFrom(buf *Buffer) (err error) { + flags := buf.ConsumeUint32() - return a.XXX_UnmarshalByFlags(flags, b) + return a.XXX_UnmarshalByFlags(flags, buf) } // XXX_UnmarshalByFlags uses the pre-existing a.Flags field to determine which fields to decode. // DO NOT USE THIS: it is an anti-corruption function to implement existing internal usage in pkg/sftp. // This function is not a part of any compatibility promise. -func (a *Attributes) XXX_UnmarshalByFlags(flags uint32, b *Buffer) (err error) { +func (a *Attributes) XXX_UnmarshalByFlags(flags uint32, buf *Buffer) (err error) { a.Flags = flags // Short-circuit dummy attributes. if a.Flags == 0 { - return nil + return buf.Err } if a.Flags&AttrSize != 0 { - if a.Size, err = b.ConsumeUint64(); err != nil { - return err - } + a.Size = buf.ConsumeUint64() } if a.Flags&AttrUIDGID != 0 { - if a.UID, err = b.ConsumeUint32(); err != nil { - return err - } - - if a.GID, err = b.ConsumeUint32(); err != nil { - return err - } + a.UID = buf.ConsumeUint32() + a.GID = buf.ConsumeUint32() } if a.Flags&AttrPermissions != 0 { - m, err := b.ConsumeUint32() - if err != nil { - return err - } - - a.Permissions = FileMode(m) + a.Permissions = FileMode(buf.ConsumeUint32()) } if a.Flags&AttrACModTime != 0 { - if a.ATime, err = b.ConsumeUint32(); err != nil { - return err - } - - if a.MTime, err = b.ConsumeUint32(); err != nil { - return err - } + a.ATime = buf.ConsumeUint32() + a.MTime = buf.ConsumeUint32() } if a.Flags&AttrExtended != 0 { - count, err := b.ConsumeUint32() - if err != nil { - return err - } + count := buf.ConsumeCount() a.ExtendedAttributes = make([]ExtendedAttribute, count) for i := range a.ExtendedAttributes { - a.ExtendedAttributes[i].UnmarshalFrom(b) + a.ExtendedAttributes[i].UnmarshalFrom(buf) } } - return nil + return buf.Err } // UnmarshalBinary decodes the binary encoding of Attributes into e. @@ -233,7 +210,7 @@ func (a *Attributes) UnmarshalBinary(data []byte) error { // ExtendedAttribute defines the extended file attribute type defined in draft-ietf-secsh-filexfer-02 // -// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 +// Defined in: https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-5 type ExtendedAttribute struct { Type string Data string @@ -245,9 +222,9 @@ func (e *ExtendedAttribute) Len() int { } // MarshalInto marshals e onto the end of the given Buffer. -func (e *ExtendedAttribute) MarshalInto(b *Buffer) { - b.AppendString(e.Type) - b.AppendString(e.Data) +func (e *ExtendedAttribute) MarshalInto(buf *Buffer) { + buf.AppendString(e.Type) + buf.AppendString(e.Data) } // MarshalBinary returns e as the binary encoding of e. @@ -258,16 +235,13 @@ func (e *ExtendedAttribute) MarshalBinary() ([]byte, error) { } // UnmarshalFrom unmarshals an ExtendedAattribute from the given Buffer into e. -func (e *ExtendedAttribute) UnmarshalFrom(b *Buffer) (err error) { - if e.Type, err = b.ConsumeString(); err != nil { - return err +func (e *ExtendedAttribute) UnmarshalFrom(buf *Buffer) (err error) { + *e = ExtendedAttribute{ + Type: buf.ConsumeString(), + Data: buf.ConsumeString(), } - if e.Data, err = b.ConsumeString(); err != nil { - return err - } - - return nil + return buf.Err } // UnmarshalBinary decodes the binary encoding of ExtendedAttribute into e. @@ -290,11 +264,11 @@ func (e *NameEntry) Len() int { } // MarshalInto marshals e onto the end of the given Buffer. -func (e *NameEntry) MarshalInto(b *Buffer) { - b.AppendString(e.Filename) - b.AppendString(e.Longname) +func (e *NameEntry) MarshalInto(buf *Buffer) { + buf.AppendString(e.Filename) + buf.AppendString(e.Longname) - e.Attrs.MarshalInto(b) + e.Attrs.MarshalInto(buf) } // MarshalBinary returns e as the binary encoding of e. @@ -307,16 +281,13 @@ func (e *NameEntry) MarshalBinary() ([]byte, error) { // UnmarshalFrom unmarshals an NameEntry from the given Buffer into e. // // NOTE: The values of fields not covered in the a.Flags are explicitly undefined. -func (e *NameEntry) UnmarshalFrom(b *Buffer) (err error) { - if e.Filename, err = b.ConsumeString(); err != nil { - return err +func (e *NameEntry) UnmarshalFrom(buf *Buffer) (err error) { + *e = NameEntry{ + Filename: buf.ConsumeString(), + Longname: buf.ConsumeString(), } - if e.Longname, err = b.ConsumeString(); err != nil { - return err - } - - return e.Attrs.UnmarshalFrom(b) + return e.Attrs.UnmarshalFrom(buf) } // UnmarshalBinary decodes the binary encoding of NameEntry into e. diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go index a6086036e7..bd4783bb88 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx import ( "encoding/binary" @@ -17,6 +17,7 @@ var ( type Buffer struct { b []byte off int + Err error } // NewBuffer creates and initializes a new buffer using buf as its initial contents. @@ -51,14 +52,17 @@ func (b *Buffer) Cap() int { return cap(b.b) } // Reset resets the buffer to be empty, but it retains the underlying storage for use by future Appends. func (b *Buffer) Reset() { - b.b = b.b[:0] - b.off = 0 + *b = Buffer{ + b: b.b[:0], + } } // StartPacket resets and initializes the buffer to be ready to start marshaling a packet into. // It truncates the buffer, reserves space for uint32(length), then appends the given packetType and requestID. func (b *Buffer) StartPacket(packetType PacketType, requestID uint32) { - b.b, b.off = append(b.b[:0], make([]byte, 4)...), 0 + *b = Buffer{ + b: append(b.b[:0], make([]byte, 4)...), + } b.AppendUint8(uint8(packetType)) b.AppendUint32(requestID) @@ -81,15 +85,21 @@ func (b *Buffer) Packet(payload []byte) (header, payloadPassThru []byte, err err } // ConsumeUint8 consumes a single byte from the buffer. -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeUint8() (uint8, error) { +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeUint8() uint8 { + if b.Err != nil { + return 0 + } + if b.Len() < 1 { - return 0, ErrShortPacket + b.off = len(b.b) + b.Err = ErrShortPacket + return 0 } var v uint8 v, b.off = b.b[b.off], b.off+1 - return v, nil + return v } // AppendUint8 appends a single byte into the buffer. @@ -98,14 +108,9 @@ func (b *Buffer) AppendUint8(v uint8) { } // ConsumeBool consumes a single byte from the buffer, and returns true if that byte is non-zero. -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeBool() (bool, error) { - v, err := b.ConsumeUint8() - if err != nil { - return false, err - } - - return v != 0, nil +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeBool() bool { + return b.ConsumeUint8() != 0 } // AppendBool appends a single bool into the buffer. @@ -119,15 +124,21 @@ func (b *Buffer) AppendBool(v bool) { } // ConsumeUint16 consumes a single uint16 from the buffer, in network byte order (big-endian). -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeUint16() (uint16, error) { +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeUint16() uint16 { + if b.Err != nil { + return 0 + } + if b.Len() < 2 { - return 0, ErrShortPacket + b.off = len(b.b) + b.Err = ErrShortPacket + return 0 } v := binary.BigEndian.Uint16(b.b[b.off:]) b.off += 2 - return v, nil + return v } // AppendUint16 appends single uint16 into the buffer, in network byte order (big-endian). @@ -146,15 +157,21 @@ func unmarshalUint32(b []byte) uint32 { } // ConsumeUint32 consumes a single uint32 from the buffer, in network byte order (big-endian). -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeUint32() (uint32, error) { +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeUint32() uint32 { + if b.Err != nil { + return 0 + } + if b.Len() < 4 { - return 0, ErrShortPacket + b.off = len(b.b) + b.Err = ErrShortPacket + return 0 } v := binary.BigEndian.Uint32(b.b[b.off:]) b.off += 4 - return v, nil + return v } // AppendUint32 appends a single uint32 into the buffer, in network byte order (big-endian). @@ -167,16 +184,33 @@ func (b *Buffer) AppendUint32(v uint32) { ) } +// ConsumeCount consumes a single uint32 count from the buffer, in network byte order (big-endian) as an int. +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeCount() int { + return int(b.ConsumeUint32()) +} + +// AppendCount appends a single int length as a uint32 into the buffer, in network byte order (big-endian). +func (b *Buffer) AppendCount(v int) { + b.AppendUint32(uint32(v)) +} + // ConsumeUint64 consumes a single uint64 from the buffer, in network byte order (big-endian). -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeUint64() (uint64, error) { +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeUint64() uint64 { + if b.Err != nil { + return 0 + } + if b.Len() < 8 { - return 0, ErrShortPacket + b.off = len(b.b) + b.Err = ErrShortPacket + return 0 } v := binary.BigEndian.Uint64(b.b[b.off:]) b.off += 8 - return v, nil + return v } // AppendUint64 appends a single uint64 into the buffer, in network byte order (big-endian). @@ -194,14 +228,9 @@ func (b *Buffer) AppendUint64(v uint64) { } // ConsumeInt64 consumes a single int64 from the buffer, in network byte order (big-endian) with two’s complement. -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeInt64() (int64, error) { - u, err := b.ConsumeUint64() - if err != nil { - return 0, err - } - - return int64(u), err +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeInt64() int64 { + return int64(b.ConsumeUint64()) } // AppendInt64 appends a single int64 into the buffer, in network byte order (big-endian) with two’s complement. @@ -211,29 +240,52 @@ func (b *Buffer) AppendInt64(v int64) { // ConsumeByteSlice consumes a single string of raw binary data from the buffer. // A string is a uint32 length, followed by that number of raw bytes. -// If the buffer does not have enough data, or defines a length larger than available, it will return ErrShortPacket. +// If the buffer does not have enough data, or defines a length larger than available, it will set Err to ErrShortPacket. // // The returned slice aliases the buffer contents, and is valid only as long as the buffer is not reused // (that is, only until the next call to Reset, PutLength, StartPacket, or UnmarshalBinary). // // In no case will any Consume calls return overlapping slice aliases, // and Append calls are guaranteed to not disturb this slice alias. -func (b *Buffer) ConsumeByteSlice() ([]byte, error) { - length, err := b.ConsumeUint32() - if err != nil { - return nil, err +func (b *Buffer) ConsumeByteSlice() []byte { + length := int(b.ConsumeUint32()) + if b.Err != nil { + return nil } - if b.Len() < int(length) { - return nil, ErrShortPacket + if b.Len() < length || length < 0 { + b.off = len(b.b) + b.Err = ErrShortPacket + return nil } v := b.b[b.off:] - if len(v) > int(length) { + if len(v) > length || cap(v) > length { v = v[:length:length] } b.off += int(length) - return v, nil + return v +} + +// ConsumeByteSliceCopy consumes a single string of raw binary data as a copy from the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +// If the buffer does not have enough data, or defines a length larger than available, it will set Err to ErrShortPacket. +// +// The returned slice does not alias any buffer contents, +// and will therefore be valid even if the buffer is later reused. +// +// If hint has sufficient capacity to hold the data, it will be reused and overwritten, +// otherwise a new backing slice will be allocated and returned. +func (b *Buffer) ConsumeByteSliceCopy(hint []byte) []byte { + data := b.ConsumeByteSlice() + + if grow := len(data) - len(hint); grow > 0 { + hint = append(hint, make([]byte, grow)...) + } + + n := copy(hint, data) + hint = hint[:n] + return hint } // AppendByteSlice appends a single string of raw binary data into the buffer. @@ -245,17 +297,12 @@ func (b *Buffer) AppendByteSlice(v []byte) { // ConsumeString consumes a single string of binary data from the buffer. // A string is a uint32 length, followed by that number of raw bytes. -// If the buffer does not have enough data, or defines a length larger than available, it will return ErrShortPacket. +// If the buffer does not have enough data, or defines a length larger than available, it will set Err to ErrShortPacket. // // NOTE: Go implicitly assumes that strings contain UTF-8 encoded data. // All caveats on using arbitrary binary data in Go strings applies. -func (b *Buffer) ConsumeString() (string, error) { - v, err := b.ConsumeByteSlice() - if err != nil { - return "", err - } - - return string(v), nil +func (b *Buffer) ConsumeString() string { + return string(b.ConsumeByteSlice()) } // AppendString appends a single string of binary data into the buffer. diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go index 6b7b2cef4d..f717425300 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx import ( "encoding" @@ -86,8 +86,9 @@ func (p *ExtendedPacket) MarshalPacket(reqid uint32, b []byte) (header, payload // If the extension has not been registered, then a new Buffer will be allocated. // Then the request-specific-data will be unmarshaled from the rest of the buffer. func (p *ExtendedPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.ExtendedRequest, err = buf.ConsumeString(); err != nil { - return err + p.ExtendedRequest = buf.ConsumeString() + if buf.Err != nil { + return buf.Err } if p.Data == nil { diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go index 11c0b99c22..c425780ca8 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx // ExtensionPair defines the extension-pair type defined in draft-ietf-secsh-filexfer-13. // This type is backwards-compatible with how draft-ietf-secsh-filexfer-02 defines extensions. @@ -29,15 +29,12 @@ func (e *ExtensionPair) MarshalBinary() ([]byte, error) { // UnmarshalFrom unmarshals an ExtensionPair from the given Buffer into e. func (e *ExtensionPair) UnmarshalFrom(buf *Buffer) (err error) { - if e.Name, err = buf.ConsumeString(); err != nil { - return err + *e = ExtensionPair{ + Name: buf.ConsumeString(), + Data: buf.ConsumeString(), } - if e.Data, err = buf.ConsumeString(); err != nil { - return err - } - - return nil + return buf.Err } // UnmarshalBinary decodes the binary encoding of ExtensionPair into e. diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go index 1e5abf7466..d3009994ad 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go @@ -1,5 +1,5 @@ -// Package filexfer implements the wire encoding for secsh-filexfer as described in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 -package filexfer +// Package sshfx implements the wire encoding for secsh-filexfer as described in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt +package sshfx // PacketMarshaller narrowly defines packets that will only be transmitted. // diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go index 48f869861a..9abcbafcbf 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx import ( "fmt" @@ -10,7 +10,7 @@ type Status uint32 // Defines the various SSH_FX_* values. const ( // see draft-ietf-secsh-filexfer-02 - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-7 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-7 StatusOK = Status(iota) StatusEOF StatusNoSuchFile @@ -21,28 +21,28 @@ const ( StatusConnectionLost StatusOPUnsupported - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-03#section-7 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-03.txt#section-7 StatusV4InvalidHandle StatusV4NoSuchPath StatusV4FileAlreadyExists StatusV4WriteProtect - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-04#section-7 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-04.txt#section-7 StatusV4NoMedia - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-05#section-7 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-05.txt#section-7 StatusV5NoSpaceOnFilesystem StatusV5QuotaExceeded StatusV5UnknownPrincipal StatusV5LockConflict - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-06#section-8 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-06.txt#section-8 StatusV6DirNotEmpty StatusV6NotADirectory StatusV6InvalidFilename StatusV6LinkLoop - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-07#section-8 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-07.txt#section-8 StatusV6CannotDelete StatusV6InvalidParameter StatusV6FileIsADirectory @@ -50,10 +50,10 @@ const ( StatusV6ByteRangeLockRefused StatusV6DeletePending - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-08#section-8.1 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-08.txt#section-8.1 StatusV6FileCorrupt - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-10#section-9.1 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-10.txt#section-9.1 StatusV6OwnerInvalid StatusV6GroupInvalid diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go index 15caf6d28a..780800215a 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx import ( "fmt" @@ -9,7 +9,7 @@ type PacketType uint8 // Request packet types. const ( - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 PacketTypeInit = PacketType(iota + 1) PacketTypeVersion PacketTypeOpen @@ -31,17 +31,17 @@ const ( PacketTypeReadLink PacketTypeSymlink - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-07#section-3.3 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-07.txt#section-3.3 PacketTypeV6Link - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-08#section-3.3 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-08.txt#section-3.3 PacketTypeV6Block PacketTypeV6Unblock ) // Response packet types. const ( - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 PacketTypeStatus = PacketType(iota + 101) PacketTypeHandle PacketTypeData @@ -51,7 +51,7 @@ const ( // Extended packet types. const ( - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 PacketTypeExtended = PacketType(iota + 200) PacketTypeExtendedReply ) @@ -122,3 +122,48 @@ func (f PacketType) String() string { return fmt.Sprintf("SSH_FXP_UNKNOWN(%d)", f) } } + +func newPacketFromType(typ PacketType) (Packet, error) { + switch typ { + case PacketTypeOpen: + return new(OpenPacket), nil + case PacketTypeClose: + return new(ClosePacket), nil + case PacketTypeRead: + return new(ReadPacket), nil + case PacketTypeWrite: + return new(WritePacket), nil + case PacketTypeLStat: + return new(LStatPacket), nil + case PacketTypeFStat: + return new(FStatPacket), nil + case PacketTypeSetstat: + return new(SetstatPacket), nil + case PacketTypeFSetstat: + return new(FSetstatPacket), nil + case PacketTypeOpenDir: + return new(OpenDirPacket), nil + case PacketTypeReadDir: + return new(ReadDirPacket), nil + case PacketTypeRemove: + return new(RemovePacket), nil + case PacketTypeMkdir: + return new(MkdirPacket), nil + case PacketTypeRmdir: + return new(RmdirPacket), nil + case PacketTypeRealPath: + return new(RealPathPacket), nil + case PacketTypeStat: + return new(StatPacket), nil + case PacketTypeRename: + return new(RenamePacket), nil + case PacketTypeReadLink: + return new(ReadLinkPacket), nil + case PacketTypeSymlink: + return new(SymlinkPacket), nil + case PacketTypeExtended: + return new(ExtendedPacket), nil + default: + return nil, fmt.Errorf("unexpected request packet type: %v", typ) + } +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go index a142771285..44594acffd 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx // ClosePacket defines the SSH_FXP_CLOSE packet. type ClosePacket struct { @@ -27,18 +27,18 @@ func (p *ClosePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []b // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *ClosePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err + *p = ClosePacket{ + Handle: buf.ConsumeString(), } - return nil + return buf.Err } // ReadPacket defines the SSH_FXP_READ packet. type ReadPacket struct { Handle string Offset uint64 - Len uint32 + Length uint32 } // Type returns the SSH_FXP_xy value associated with this packet type. @@ -58,7 +58,7 @@ func (p *ReadPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []by buf.StartPacket(PacketTypeRead, reqid) buf.AppendString(p.Handle) buf.AppendUint64(p.Offset) - buf.AppendUint32(p.Len) + buf.AppendUint32(p.Length) return buf.Packet(payload) } @@ -66,19 +66,13 @@ func (p *ReadPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []by // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *ReadPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err + *p = ReadPacket{ + Handle: buf.ConsumeString(), + Offset: buf.ConsumeUint64(), + Length: buf.ConsumeUint32(), } - if p.Offset, err = buf.ConsumeUint64(); err != nil { - return err - } - - if p.Len, err = buf.ConsumeUint32(); err != nil { - return err - } - - return nil + return buf.Err } // WritePacket defines the SSH_FXP_WRITE packet. @@ -121,26 +115,13 @@ func (p *WritePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []b // // This means this _does not_ alias any of the data buffer that is passed in. func (p *WritePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err + *p = WritePacket{ + Handle: buf.ConsumeString(), + Offset: buf.ConsumeUint64(), + Data: buf.ConsumeByteSliceCopy(p.Data), } - if p.Offset, err = buf.ConsumeUint64(); err != nil { - return err - } - - data, err := buf.ConsumeByteSlice() - if err != nil { - return err - } - - if len(p.Data) < len(data) { - p.Data = make([]byte, len(data)) - } - - n := copy(p.Data, data) - p.Data = p.Data[:n] - return nil + return buf.Err } // FStatPacket defines the SSH_FXP_FSTAT packet. @@ -170,11 +151,11 @@ func (p *FStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []b // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *FStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err + *p = FStatPacket{ + Handle: buf.ConsumeString(), } - return nil + return buf.Err } // FSetstatPacket defines the SSH_FXP_FSETSTAT packet. @@ -207,8 +188,8 @@ func (p *FSetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *FSetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err + *p = FSetstatPacket{ + Handle: buf.ConsumeString(), } return p.Attrs.UnmarshalFrom(buf) @@ -241,9 +222,9 @@ func (p *ReadDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload [ // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *ReadDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err + *p = ReadDirPacket{ + Handle: buf.ConsumeString(), } - return nil + return buf.Err } diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go index b0bc6f5053..c553ee2e2c 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx // InitPacket defines the SSH_FXP_INIT packet. type InitPacket struct { @@ -33,8 +33,8 @@ func (p *InitPacket) MarshalBinary() ([]byte, error) { func (p *InitPacket) UnmarshalBinary(data []byte) (err error) { buf := NewBuffer(data) - if p.Version, err = buf.ConsumeUint32(); err != nil { - return err + *p = InitPacket{ + Version: buf.ConsumeUint32(), } for buf.Len() > 0 { @@ -46,7 +46,7 @@ func (p *InitPacket) UnmarshalBinary(data []byte) (err error) { p.Extensions = append(p.Extensions, &ext) } - return nil + return buf.Err } // VersionPacket defines the SSH_FXP_VERSION packet. @@ -82,8 +82,8 @@ func (p *VersionPacket) MarshalBinary() ([]byte, error) { func (p *VersionPacket) UnmarshalBinary(data []byte) (err error) { buf := NewBuffer(data) - if p.Version, err = buf.ConsumeUint32(); err != nil { - return err + *p = VersionPacket{ + Version: buf.ConsumeUint32(), } for buf.Len() > 0 { diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go index 1358711421..896ba16e50 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx // SSH_FXF_* flags. const ( @@ -43,12 +43,9 @@ func (p *OpenPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []by // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *OpenPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Filename, err = buf.ConsumeString(); err != nil { - return err - } - - if p.PFlags, err = buf.ConsumeUint32(); err != nil { - return err + *p = OpenPacket{ + Filename: buf.ConsumeString(), + PFlags: buf.ConsumeUint32(), } return p.Attrs.UnmarshalFrom(buf) @@ -81,9 +78,9 @@ func (p *OpenDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload [ // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *OpenDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = OpenDirPacket{ + Path: buf.ConsumeString(), } - return nil + return buf.Err } diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go index 3f24e9c224..fdf65d0577 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go @@ -1,59 +1,13 @@ -package filexfer +package sshfx import ( "errors" - "fmt" "io" ) // smallBufferSize is an initial allocation minimal capacity. const smallBufferSize = 64 -func newPacketFromType(typ PacketType) (Packet, error) { - switch typ { - case PacketTypeOpen: - return new(OpenPacket), nil - case PacketTypeClose: - return new(ClosePacket), nil - case PacketTypeRead: - return new(ReadPacket), nil - case PacketTypeWrite: - return new(WritePacket), nil - case PacketTypeLStat: - return new(LStatPacket), nil - case PacketTypeFStat: - return new(FStatPacket), nil - case PacketTypeSetstat: - return new(SetstatPacket), nil - case PacketTypeFSetstat: - return new(FSetstatPacket), nil - case PacketTypeOpenDir: - return new(OpenDirPacket), nil - case PacketTypeReadDir: - return new(ReadDirPacket), nil - case PacketTypeRemove: - return new(RemovePacket), nil - case PacketTypeMkdir: - return new(MkdirPacket), nil - case PacketTypeRmdir: - return new(RmdirPacket), nil - case PacketTypeRealPath: - return new(RealPathPacket), nil - case PacketTypeStat: - return new(StatPacket), nil - case PacketTypeRename: - return new(RenamePacket), nil - case PacketTypeReadLink: - return new(ReadLinkPacket), nil - case PacketTypeSymlink: - return new(SymlinkPacket), nil - case PacketTypeExtended: - return new(ExtendedPacket), nil - default: - return nil, fmt.Errorf("unexpected request packet type: %v", typ) - } -} - // RawPacket implements the general packet format from draft-ietf-secsh-filexfer-02 // // RawPacket is intended for use in clients receiving responses, @@ -63,7 +17,7 @@ func newPacketFromType(typ PacketType) (Packet, error) { // For servers expecting to receive arbitrary request packet types, // use RequestPacket. // -// Defined in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 +// Defined in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 type RawPacket struct { PacketType PacketType RequestID uint32 @@ -110,19 +64,14 @@ func (p *RawPacket) MarshalBinary() ([]byte, error) { // The Data field will alias the passed in Buffer, // so the buffer passed in should not be reused before RawPacket.Reset(). func (p *RawPacket) UnmarshalFrom(buf *Buffer) error { - typ, err := buf.ConsumeUint8() - if err != nil { - return err - } - - p.PacketType = PacketType(typ) - - if p.RequestID, err = buf.ConsumeUint32(); err != nil { - return err + *p = RawPacket{ + PacketType: PacketType(buf.ConsumeUint8()), + RequestID: buf.ConsumeUint32(), } p.Data = *buf - return nil + + return buf.Err } // UnmarshalBinary decodes a full raw packet out of the given data. @@ -225,7 +174,7 @@ func (p *RawPacket) ReadFrom(r io.Reader, b []byte, maxPacketLength uint32) erro // where automatic unmarshaling of the packet body does not make sense, // use RawPacket. // -// Defined in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 +// Defined in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 type RequestPacket struct { RequestID uint32 @@ -268,18 +217,19 @@ func (p *RequestPacket) MarshalBinary() ([]byte, error) { // The Request field may alias the passed in Buffer, (e.g. SSH_FXP_WRITE), // so the buffer passed in should not be reused before RequestPacket.Reset(). func (p *RequestPacket) UnmarshalFrom(buf *Buffer) error { - typ, err := buf.ConsumeUint8() + typ := PacketType(buf.ConsumeUint8()) + if buf.Err != nil { + return buf.Err + } + + req, err := newPacketFromType(typ) if err != nil { return err } - p.Request, err = newPacketFromType(PacketType(typ)) - if err != nil { - return err - } - - if p.RequestID, err = buf.ConsumeUint32(); err != nil { - return err + *p = RequestPacket{ + RequestID: buf.ConsumeUint32(), + Request: req, } return p.Request.UnmarshalPacketBody(buf) diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go index e6f692d9ff..0180326f63 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx // LStatPacket defines the SSH_FXP_LSTAT packet. type LStatPacket struct { @@ -27,11 +27,11 @@ func (p *LStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []b // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *LStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = LStatPacket{ + Path: buf.ConsumeString(), } - return nil + return buf.Err } // SetstatPacket defines the SSH_FXP_SETSTAT packet. @@ -64,8 +64,8 @@ func (p *SetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload [ // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *SetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = SetstatPacket{ + Path: buf.ConsumeString(), } return p.Attrs.UnmarshalFrom(buf) @@ -98,11 +98,11 @@ func (p *RemovePacket) MarshalPacket(reqid uint32, b []byte) (header, payload [] // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *RemovePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = RemovePacket{ + Path: buf.ConsumeString(), } - return nil + return buf.Err } // MkdirPacket defines the SSH_FXP_MKDIR packet. @@ -135,8 +135,8 @@ func (p *MkdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []b // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *MkdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = MkdirPacket{ + Path: buf.ConsumeString(), } return p.Attrs.UnmarshalFrom(buf) @@ -169,11 +169,11 @@ func (p *RmdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []b // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *RmdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = RmdirPacket{ + Path: buf.ConsumeString(), } - return nil + return buf.Err } // RealPathPacket defines the SSH_FXP_REALPATH packet. @@ -203,11 +203,11 @@ func (p *RealPathPacket) MarshalPacket(reqid uint32, b []byte) (header, payload // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *RealPathPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = RealPathPacket{ + Path: buf.ConsumeString(), } - return nil + return buf.Err } // StatPacket defines the SSH_FXP_STAT packet. @@ -237,11 +237,11 @@ func (p *StatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []by // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *StatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = StatPacket{ + Path: buf.ConsumeString(), } - return nil + return buf.Err } // RenamePacket defines the SSH_FXP_RENAME packet. @@ -274,15 +274,12 @@ func (p *RenamePacket) MarshalPacket(reqid uint32, b []byte) (header, payload [] // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *RenamePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.OldPath, err = buf.ConsumeString(); err != nil { - return err + *p = RenamePacket{ + OldPath: buf.ConsumeString(), + NewPath: buf.ConsumeString(), } - if p.NewPath, err = buf.ConsumeString(); err != nil { - return err - } - - return nil + return buf.Err } // ReadLinkPacket defines the SSH_FXP_READLINK packet. @@ -312,18 +309,18 @@ func (p *ReadLinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *ReadLinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = ReadLinkPacket{ + Path: buf.ConsumeString(), } - return nil + return buf.Err } // SymlinkPacket defines the SSH_FXP_SYMLINK packet. // // The order of the arguments to the SSH_FXP_SYMLINK method was inadvertently reversed. // Unfortunately, the reversal was not noticed until the server was widely deployed. -// Covered in Section 3.1 of https://github.com/openssh/openssh-portable/blob/master/PROTOCOL +// Covered in Section 4.1 of https://github.com/openssh/openssh-portable/blob/master/PROTOCOL type SymlinkPacket struct { LinkPath string TargetPath string @@ -355,14 +352,11 @@ func (p *SymlinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload [ // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *SymlinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - // Arguments were inadvertently reversed. - if p.TargetPath, err = buf.ConsumeString(); err != nil { - return err + *p = SymlinkPacket{ + // Arguments were inadvertently reversed. + TargetPath: buf.ConsumeString(), + LinkPath: buf.ConsumeString(), } - if p.LinkPath, err = buf.ConsumeString(); err != nil { - return err - } - - return nil + return buf.Err } diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go index 2fe63d5916..0143ec0c99 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx // FileMode represents a file’s mode and permission bits. // The bits are defined according to POSIX standards, diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go index 7a9b3eae85..311708ffec 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx import ( "fmt" @@ -6,7 +6,7 @@ import ( // StatusPacket defines the SSH_FXP_STATUS packet. // -// Specified in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-7 +// Specified in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-7 type StatusPacket struct { StatusCode Status ErrorMessage string @@ -19,7 +19,7 @@ func (p *StatusPacket) Error() string { return "sftp: " + p.StatusCode.String() } - return fmt.Sprintf("sftp: %q (%s)", p.ErrorMessage, p.StatusCode) + return fmt.Sprintf("sftp: %s: %q", p.StatusCode, p.ErrorMessage) } // Is returns true if target is a StatusPacket with the same StatusCode, @@ -57,21 +57,13 @@ func (p *StatusPacket) MarshalPacket(reqid uint32, b []byte) (header, payload [] // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *StatusPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - statusCode, err := buf.ConsumeUint32() - if err != nil { - return err - } - p.StatusCode = Status(statusCode) - - if p.ErrorMessage, err = buf.ConsumeString(); err != nil { - return err + *p = StatusPacket{ + StatusCode: Status(buf.ConsumeUint32()), + ErrorMessage: buf.ConsumeString(), + LanguageTag: buf.ConsumeString(), } - if p.LanguageTag, err = buf.ConsumeString(); err != nil { - return err - } - - return nil + return buf.Err } // HandlePacket defines the SSH_FXP_HANDLE packet. @@ -101,11 +93,11 @@ func (p *HandlePacket) MarshalPacket(reqid uint32, b []byte) (header, payload [] // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *HandlePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err + *p = HandlePacket{ + Handle: buf.ConsumeString(), } - return nil + return buf.Err } // DataPacket defines the SSH_FXP_DATA packet. @@ -143,18 +135,11 @@ func (p *DataPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []by // // This means this _does not_ alias any of the data buffer that is passed in. func (p *DataPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - data, err := buf.ConsumeByteSlice() - if err != nil { - return err + *p = DataPacket{ + Data: buf.ConsumeByteSliceCopy(p.Data), } - if len(p.Data) < len(data) { - p.Data = make([]byte, len(data)) - } - - n := copy(p.Data, data) - p.Data = p.Data[:n] - return nil + return buf.Err } // NamePacket defines the SSH_FXP_NAME packet. @@ -193,14 +178,16 @@ func (p *NamePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []by // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *NamePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - count, err := buf.ConsumeUint32() - if err != nil { - return err + count := buf.ConsumeCount() + if buf.Err != nil { + return buf.Err } - p.Entries = make([]*NameEntry, 0, count) + *p = NamePacket{ + Entries: make([]*NameEntry, 0, count), + } - for i := uint32(0); i < count; i++ { + for i := 0; i < count; i++ { var e NameEntry if err := e.UnmarshalFrom(buf); err != nil { return err @@ -209,7 +196,7 @@ func (p *NamePacket) UnmarshalPacketBody(buf *Buffer) (err error) { p.Entries = append(p.Entries, &e) } - return nil + return buf.Err } // AttrsPacket defines the SSH_FXP_ATTRS packet. diff --git a/vendor/github.com/pkg/sftp/ls_formatting.go b/vendor/github.com/pkg/sftp/ls_formatting.go index e083e22a49..19271ad7a6 100644 --- a/vendor/github.com/pkg/sftp/ls_formatting.go +++ b/vendor/github.com/pkg/sftp/ls_formatting.go @@ -60,6 +60,13 @@ func runLs(idLookup NameLookupFileLister, dirent os.FileInfo) string { uid = lsFormatID(sys.UID) gid = lsFormatID(sys.GID) default: + if fiExt, ok := dirent.(FileInfoUidGid); ok { + uid = lsFormatID(fiExt.Uid()) + gid = lsFormatID(fiExt.Gid()) + + break + } + numLinks, uid, gid = lsLinksUIDGID(dirent) } diff --git a/vendor/github.com/pkg/sftp/ls_plan9.go b/vendor/github.com/pkg/sftp/ls_plan9.go index a16a3ea06e..b70b29420f 100644 --- a/vendor/github.com/pkg/sftp/ls_plan9.go +++ b/vendor/github.com/pkg/sftp/ls_plan9.go @@ -1,3 +1,4 @@ +//go:build plan9 // +build plan9 package sftp diff --git a/vendor/github.com/pkg/sftp/ls_stub.go b/vendor/github.com/pkg/sftp/ls_stub.go index 6dec393785..f58abf7823 100644 --- a/vendor/github.com/pkg/sftp/ls_stub.go +++ b/vendor/github.com/pkg/sftp/ls_stub.go @@ -1,3 +1,4 @@ +//go:build windows || android // +build windows android package sftp diff --git a/vendor/github.com/pkg/sftp/ls_unix.go b/vendor/github.com/pkg/sftp/ls_unix.go index 59ccffde5e..0beba32b2c 100644 --- a/vendor/github.com/pkg/sftp/ls_unix.go +++ b/vendor/github.com/pkg/sftp/ls_unix.go @@ -1,3 +1,4 @@ +//go:build aix || darwin || dragonfly || freebsd || (!android && linux) || netbsd || openbsd || solaris || js // +build aix darwin dragonfly freebsd !android,linux netbsd openbsd solaris js package sftp diff --git a/vendor/github.com/pkg/sftp/packet-manager.go b/vendor/github.com/pkg/sftp/packet-manager.go index c740c4c8c9..647836ba82 100644 --- a/vendor/github.com/pkg/sftp/packet-manager.go +++ b/vendor/github.com/pkg/sftp/packet-manager.go @@ -40,7 +40,7 @@ func newPktMgr(sender packetSender) *packetManager { return s } -//// packet ordering +// // packet ordering func (s *packetManager) newOrderID() uint32 { s.packetCount++ return s.packetCount @@ -89,7 +89,7 @@ func (o orderedPackets) Sort() { }) } -//// packet registry +// // packet registry // register incoming packets to be handled func (s *packetManager) incomingPacket(pkt orderedRequest) { s.working.Add(1) diff --git a/vendor/github.com/pkg/sftp/packet-typing.go b/vendor/github.com/pkg/sftp/packet-typing.go index f4f9052950..fec88e7b51 100644 --- a/vendor/github.com/pkg/sftp/packet-typing.go +++ b/vendor/github.com/pkg/sftp/packet-typing.go @@ -31,7 +31,7 @@ type notReadOnly interface { notReadOnly() } -//// define types by adding methods +// // define types by adding methods // hasPath func (p *sshFxpLstatPacket) getPath() string { return p.Path } func (p *sshFxpStatPacket) getPath() string { return p.Path } diff --git a/vendor/github.com/pkg/sftp/packet.go b/vendor/github.com/pkg/sftp/packet.go index 4059cf8e0a..1232ff1efe 100644 --- a/vendor/github.com/pkg/sftp/packet.go +++ b/vendor/github.com/pkg/sftp/packet.go @@ -71,6 +71,15 @@ func marshalFileInfo(b []byte, fi os.FileInfo) []byte { b = marshalUint32(b, fileStat.Mtime) } + if flags&sshFileXferAttrExtended != 0 { + b = marshalUint32(b, uint32(len(fileStat.Extended))) + + for _, attr := range fileStat.Extended { + b = marshalString(b, attr.ExtType) + b = marshalString(b, attr.ExtData) + } + } + return b } @@ -281,6 +290,11 @@ func recvPacket(r io.Reader, alloc *allocator, orderID uint32) (uint8, []byte, e b = make([]byte, length) } if _, err := io.ReadFull(r, b[:length]); err != nil { + // ReadFull only returns EOF if it has read no bytes. + // In this case, that means a partial packet, and thus unexpected. + if err == io.EOF { + err = io.ErrUnexpectedEOF + } debug("recv packet %d bytes: err %v", length, err) return 0, nil, err } @@ -522,7 +536,12 @@ func (p *sshFxpRmdirPacket) UnmarshalBinary(b []byte) error { } type sshFxpSymlinkPacket struct { - ID uint32 + ID uint32 + + // The order of the arguments to the SSH_FXP_SYMLINK method was inadvertently reversed. + // Unfortunately, the reversal was not noticed until the server was widely deployed. + // Covered in Section 4.1 of https://github.com/openssh/openssh-portable/blob/master/PROTOCOL + Targetpath string Linkpath string } @@ -1242,7 +1261,7 @@ func (p *sshFxpExtendedPacketPosixRename) UnmarshalBinary(b []byte) error { } func (p *sshFxpExtendedPacketPosixRename) respond(s *Server) responsePacket { - err := os.Rename(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) + err := os.Rename(s.toLocalPath(p.Oldpath), s.toLocalPath(p.Newpath)) return statusFromError(p.ID, err) } @@ -1271,6 +1290,6 @@ func (p *sshFxpExtendedPacketHardlink) UnmarshalBinary(b []byte) error { } func (p *sshFxpExtendedPacketHardlink) respond(s *Server) responsePacket { - err := os.Link(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) + err := os.Link(s.toLocalPath(p.Oldpath), s.toLocalPath(p.Newpath)) return statusFromError(p.ID, err) } diff --git a/vendor/github.com/pkg/sftp/release.go b/vendor/github.com/pkg/sftp/release.go index b695528fde..9ecedc4410 100644 --- a/vendor/github.com/pkg/sftp/release.go +++ b/vendor/github.com/pkg/sftp/release.go @@ -1,3 +1,4 @@ +//go:build !debug // +build !debug package sftp diff --git a/vendor/github.com/pkg/sftp/request-example.go b/vendor/github.com/pkg/sftp/request-example.go index ba22bcd0f8..519b3b7680 100644 --- a/vendor/github.com/pkg/sftp/request-example.go +++ b/vendor/github.com/pkg/sftp/request-example.go @@ -391,21 +391,6 @@ func (fs *root) Filelist(r *Request) (ListerAt, error) { return nil, err } return listerat{file}, nil - - case "Readlink": - symlink, err := fs.readlink(r.Filepath) - if err != nil { - return nil, err - } - - // SFTP-v2: The server will respond with a SSH_FXP_NAME packet containing only - // one name and a dummy attributes value. - return listerat{ - &memFile{ - name: symlink, - err: os.ErrNotExist, // prevent accidental use as a reader/writer. - }, - }, nil } return nil, errors.New("unsupported") @@ -434,7 +419,7 @@ func (fs *root) readdir(pathname string) ([]os.FileInfo, error) { return files, nil } -func (fs *root) readlink(pathname string) (string, error) { +func (fs *root) Readlink(pathname string) (string, error) { file, err := fs.lfetch(pathname) if err != nil { return "", err @@ -464,19 +449,10 @@ func (fs *root) Lstat(r *Request) (ListerAt, error) { return listerat{file}, nil } -// implements RealpathFileLister interface -func (fs *root) Realpath(p string) string { - if fs.startDirectory == "" || fs.startDirectory == "/" { - return cleanPath(p) - } - return cleanPathWithBase(fs.startDirectory, p) -} - // In memory file-system-y thing that the Hanlders live on type root struct { - rootFile *memFile - mockErr error - startDirectory string + rootFile *memFile + mockErr error mu sync.Mutex files map[string]*memFile @@ -534,8 +510,8 @@ func (fs *root) exists(path string) bool { return err != os.ErrNotExist } -func (fs *root) fetch(path string) (*memFile, error) { - file, err := fs.lfetch(path) +func (fs *root) fetch(pathname string) (*memFile, error) { + file, err := fs.lfetch(pathname) if err != nil { return nil, err } @@ -546,7 +522,12 @@ func (fs *root) fetch(path string) (*memFile, error) { return nil, errTooManySymlinks } - file, err = fs.lfetch(file.symlink) + linkTarget := file.symlink + if !path.IsAbs(linkTarget) { + linkTarget = path.Join(path.Dir(file.name), linkTarget) + } + + file, err = fs.lfetch(linkTarget) if err != nil { return nil, err } diff --git a/vendor/github.com/pkg/sftp/request-interfaces.go b/vendor/github.com/pkg/sftp/request-interfaces.go index e5dc49bb19..2090e31627 100644 --- a/vendor/github.com/pkg/sftp/request-interfaces.go +++ b/vendor/github.com/pkg/sftp/request-interfaces.go @@ -74,6 +74,11 @@ type StatVFSFileCmder interface { // FileLister should return an object that fulfils the ListerAt interface // Note in cases of an error, the error text will be sent to the client. // Called for Methods: List, Stat, Readlink +// +// Since Filelist returns an os.FileInfo, this can make it non-ideal for implementing Readlink. +// This is because the Name receiver method defined by that interface defines that it should only return the base name. +// However, Readlink is required to be capable of returning essentially any arbitrary valid path relative or absolute. +// In order to implement this more expressive requirement, implement [ReadlinkFileLister] which will then be used instead. type FileLister interface { Filelist(*Request) (ListerAt, error) } @@ -87,12 +92,33 @@ type LstatFileLister interface { } // RealPathFileLister is a FileLister that implements the Realpath method. -// We use "/" as start directory for relative paths, implementing this -// interface you can customize the start directory. +// The built-in RealPath implementation does not resolve symbolic links. +// By implementing this interface you can customize the returned path +// and, for example, resolve symbolinc links if needed for your use case. // You have to return an absolute POSIX path. // -// Deprecated: if you want to set a start directory use WithStartDirectory RequestServerOption instead. +// Up to v1.13.5 the signature for the RealPath method was: +// +// # RealPath(string) string +// +// we have added a legacyRealPathFileLister that implements the old method +// to ensure that your code does not break. +// You should use the new method signature to avoid future issues type RealPathFileLister interface { + FileLister + RealPath(string) (string, error) +} + +// ReadlinkFileLister is a FileLister that implements the Readlink method. +// By implementing the Readlink method, it is possible to return any arbitrary valid path relative or absolute. +// This allows giving a better response than via the default FileLister (which is limited to os.FileInfo, whose Name method should only return the base name of a file) +type ReadlinkFileLister interface { + FileLister + Readlink(string) (string, error) +} + +// This interface is here for backward compatibility only +type legacyRealPathFileLister interface { FileLister RealPath(string) string } @@ -105,11 +131,19 @@ type NameLookupFileLister interface { LookupGroupName(string) string } -// ListerAt does for file lists what io.ReaderAt does for files. -// ListAt should return the number of entries copied and an io.EOF -// error if at end of list. This is testable by comparing how many you -// copied to how many could be copied (eg. n < len(ls) below). +// ListerAt does for file lists what io.ReaderAt does for files, i.e. a []os.FileInfo buffer is passed to the ListAt function +// and the entries that are populated in the buffer will be passed to the client. +// +// ListAt should return the number of entries copied and an io.EOF error if at end of list. +// This is testable by comparing how many you copied to how many could be copied (eg. n < len(ls) below). // The copy() builtin is best for the copying. +// +// Uid and gid information will on unix systems be retrieved from [os.FileInfo.Sys] +// if this function returns a [syscall.Stat_t] when called on a populated entry. +// Alternatively, if the entry implements [FileInfoUidGid], it will be used for uid and gid information. +// +// If a populated entry implements [FileInfoExtendedData], extended attributes will also be returned to the client. +// // Note in cases of an error, the error text will be sent to the client. type ListerAt interface { ListAt([]os.FileInfo, int64) (int, error) diff --git a/vendor/github.com/pkg/sftp/request-plan9.go b/vendor/github.com/pkg/sftp/request-plan9.go index 2444da593d..38f91bcde2 100644 --- a/vendor/github.com/pkg/sftp/request-plan9.go +++ b/vendor/github.com/pkg/sftp/request-plan9.go @@ -1,10 +1,9 @@ +//go:build plan9 // +build plan9 package sftp import ( - "path" - "path/filepath" "syscall" ) @@ -15,20 +14,3 @@ func fakeFileInfoSys() interface{} { func testOsSys(sys interface{}) error { return nil } - -func toLocalPath(p string) string { - lp := filepath.FromSlash(p) - - if path.IsAbs(p) { - tmp := lp[1:] - - if filepath.IsAbs(tmp) { - // If the FromSlash without any starting slashes is absolute, - // then we have a filepath encoded with a prefix '/'. - // e.g. "/#s/boot" to "#s/boot" - return tmp - } - } - - return lp -} diff --git a/vendor/github.com/pkg/sftp/request-readme.md b/vendor/github.com/pkg/sftp/request-readme.md index f887274dcc..f8b81f3aa3 100644 --- a/vendor/github.com/pkg/sftp/request-readme.md +++ b/vendor/github.com/pkg/sftp/request-readme.md @@ -28,7 +28,7 @@ then sends to the client. Handler for "Put" method and returns an io.Writer for the file which the server then writes the uploaded file to. The file opening "pflags" are currently preserved in the Request.Flags field as a 32bit bitmask value. See the [SFTP -spec](https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.3) for +spec](https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-6.3) for details. ### Filecmd(*Request) error diff --git a/vendor/github.com/pkg/sftp/request-server.go b/vendor/github.com/pkg/sftp/request-server.go index b7dadd6c1b..7a99db648c 100644 --- a/vendor/github.com/pkg/sftp/request-server.go +++ b/vendor/github.com/pkg/sftp/request-server.go @@ -219,12 +219,21 @@ func (rs *RequestServer) packetWorker(ctx context.Context, pktChan chan orderedR rpkt = statusFromError(pkt.ID, rs.closeRequest(handle)) case *sshFxpRealpathPacket: var realPath string - if realPather, ok := rs.Handlers.FileList.(RealPathFileLister); ok { - realPath = realPather.RealPath(pkt.getPath()) - } else { + var err error + + switch pather := rs.Handlers.FileList.(type) { + case RealPathFileLister: + realPath, err = pather.RealPath(pkt.getPath()) + case legacyRealPathFileLister: + realPath = pather.RealPath(pkt.getPath()) + default: realPath = cleanPathWithBase(rs.startDirectory, pkt.getPath()) } - rpkt = cleanPacketPath(pkt, realPath) + if err != nil { + rpkt = statusFromError(pkt.ID, err) + } else { + rpkt = cleanPacketPath(pkt, realPath) + } case *sshFxpOpendirPacket: request := requestFromPacket(ctx, pkt, rs.startDirectory) handle := rs.nextRequest(request) diff --git a/vendor/github.com/pkg/sftp/request-unix.go b/vendor/github.com/pkg/sftp/request-unix.go index 50b08a38d3..e3e037d606 100644 --- a/vendor/github.com/pkg/sftp/request-unix.go +++ b/vendor/github.com/pkg/sftp/request-unix.go @@ -1,3 +1,4 @@ +//go:build !windows && !plan9 // +build !windows,!plan9 package sftp @@ -21,7 +22,3 @@ func testOsSys(sys interface{}) error { } return nil } - -func toLocalPath(p string) string { - return p -} diff --git a/vendor/github.com/pkg/sftp/request.go b/vendor/github.com/pkg/sftp/request.go index 116c27aab9..57d788df64 100644 --- a/vendor/github.com/pkg/sftp/request.go +++ b/vendor/github.com/pkg/sftp/request.go @@ -187,6 +187,7 @@ func requestFromPacket(ctx context.Context, pkt hasPath, baseDir string) *Reques // NOTE: given a POSIX compliant signature: symlink(target, linkpath string) // this makes Request.Target the linkpath, and Request.Filepath the target. request.Target = cleanPathWithBase(baseDir, p.Linkpath) + request.Filepath = p.Targetpath case *sshFxpExtendedPacketHardlink: request.Target = cleanPathWithBase(baseDir, p.Newpath) } @@ -294,7 +295,12 @@ func (r *Request) call(handlers Handlers, pkt requestPacket, alloc *allocator, o return filecmd(handlers.FileCmd, r, pkt) case "List": return filelist(handlers.FileList, r, pkt) - case "Stat", "Lstat", "Readlink": + case "Stat", "Lstat": + return filestat(handlers.FileList, r, pkt) + case "Readlink": + if readlinkFileLister, ok := handlers.FileList.(ReadlinkFileLister); ok { + return readlink(readlinkFileLister, r, pkt) + } return filestat(handlers.FileList, r, pkt) default: return statusFromError(pkt.id(), fmt.Errorf("unexpected method: %s", r.Method)) @@ -598,6 +604,23 @@ func filestat(h FileLister, r *Request, pkt requestPacket) responsePacket { } } +func readlink(readlinkFileLister ReadlinkFileLister, r *Request, pkt requestPacket) responsePacket { + resolved, err := readlinkFileLister.Readlink(r.Filepath) + if err != nil { + return statusFromError(pkt.id(), err) + } + return &sshFxpNamePacket{ + ID: pkt.id(), + NameAttrs: []*sshFxpNameAttr{ + { + Name: resolved, + LongName: resolved, + Attrs: emptyFileStat, + }, + }, + } +} + // init attributes of request object from packet data func requestMethod(p requestPacket) (method string) { switch p.(type) { diff --git a/vendor/github.com/pkg/sftp/request_windows.go b/vendor/github.com/pkg/sftp/request_windows.go index 1f6d3df170..bd1d686457 100644 --- a/vendor/github.com/pkg/sftp/request_windows.go +++ b/vendor/github.com/pkg/sftp/request_windows.go @@ -1,8 +1,6 @@ package sftp import ( - "path" - "path/filepath" "syscall" ) @@ -13,32 +11,3 @@ func fakeFileInfoSys() interface{} { func testOsSys(sys interface{}) error { return nil } - -func toLocalPath(p string) string { - lp := filepath.FromSlash(p) - - if path.IsAbs(p) { - tmp := lp - for len(tmp) > 0 && tmp[0] == '\\' { - tmp = tmp[1:] - } - - if filepath.IsAbs(tmp) { - // If the FromSlash without any starting slashes is absolute, - // then we have a filepath encoded with a prefix '/'. - // e.g. "/C:/Windows" to "C:\\Windows" - return tmp - } - - tmp += "\\" - - if filepath.IsAbs(tmp) { - // If the FromSlash without any starting slashes but with extra end slash is absolute, - // then we have a filepath encoded with a prefix '/' and a dropped '/' at the end. - // e.g. "/C:" to "C:\\" - return tmp - } - } - - return lp -} diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go index 529052b444..2e419f5909 100644 --- a/vendor/github.com/pkg/sftp/server.go +++ b/vendor/github.com/pkg/sftp/server.go @@ -24,7 +24,7 @@ const ( // Server is an SSH File Transfer Protocol (sftp) server. // This is intended to provide the sftp subsystem to an ssh server daemon. // This implementation currently supports most of sftp server protocol version 3, -// as specified at http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +// as specified at https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt type Server struct { *serverConn debugStream io.Writer @@ -33,6 +33,7 @@ type Server struct { openFiles map[string]*os.File openFilesLock sync.RWMutex handleCount int + workDir string } func (svr *Server) nextHandle(f *os.File) string { @@ -128,6 +129,16 @@ func WithAllocator() ServerOption { } } +// WithServerWorkingDirectory sets a working directory to use as base +// for relative paths. +// If unset the default is current working directory (os.Getwd). +func WithServerWorkingDirectory(workDir string) ServerOption { + return func(s *Server) error { + s.workDir = cleanPath(workDir) + return nil + } +} + type rxPacket struct { pktType fxp pktBytes []byte @@ -174,7 +185,7 @@ func handlePacket(s *Server, p orderedRequest) error { } case *sshFxpStatPacket: // stat the requested file - info, err := os.Stat(toLocalPath(p.Path)) + info, err := os.Stat(s.toLocalPath(p.Path)) rpkt = &sshFxpStatResponse{ ID: p.ID, info: info, @@ -184,7 +195,7 @@ func handlePacket(s *Server, p orderedRequest) error { } case *sshFxpLstatPacket: // stat the requested file - info, err := os.Lstat(toLocalPath(p.Path)) + info, err := os.Lstat(s.toLocalPath(p.Path)) rpkt = &sshFxpStatResponse{ ID: p.ID, info: info, @@ -208,24 +219,24 @@ func handlePacket(s *Server, p orderedRequest) error { } case *sshFxpMkdirPacket: // TODO FIXME: ignore flags field - err := os.Mkdir(toLocalPath(p.Path), 0755) + err := os.Mkdir(s.toLocalPath(p.Path), 0o755) rpkt = statusFromError(p.ID, err) case *sshFxpRmdirPacket: - err := os.Remove(toLocalPath(p.Path)) + err := os.Remove(s.toLocalPath(p.Path)) rpkt = statusFromError(p.ID, err) case *sshFxpRemovePacket: - err := os.Remove(toLocalPath(p.Filename)) + err := os.Remove(s.toLocalPath(p.Filename)) rpkt = statusFromError(p.ID, err) case *sshFxpRenamePacket: - err := os.Rename(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) + err := os.Rename(s.toLocalPath(p.Oldpath), s.toLocalPath(p.Newpath)) rpkt = statusFromError(p.ID, err) case *sshFxpSymlinkPacket: - err := os.Symlink(toLocalPath(p.Targetpath), toLocalPath(p.Linkpath)) + err := os.Symlink(s.toLocalPath(p.Targetpath), s.toLocalPath(p.Linkpath)) rpkt = statusFromError(p.ID, err) case *sshFxpClosePacket: rpkt = statusFromError(p.ID, s.closeHandle(p.Handle)) case *sshFxpReadlinkPacket: - f, err := os.Readlink(toLocalPath(p.Path)) + f, err := os.Readlink(s.toLocalPath(p.Path)) rpkt = &sshFxpNamePacket{ ID: p.ID, NameAttrs: []*sshFxpNameAttr{ @@ -240,7 +251,7 @@ func handlePacket(s *Server, p orderedRequest) error { rpkt = statusFromError(p.ID, err) } case *sshFxpRealpathPacket: - f, err := filepath.Abs(toLocalPath(p.Path)) + f, err := filepath.Abs(s.toLocalPath(p.Path)) f = cleanPath(f) rpkt = &sshFxpNamePacket{ ID: p.ID, @@ -256,13 +267,14 @@ func handlePacket(s *Server, p orderedRequest) error { rpkt = statusFromError(p.ID, err) } case *sshFxpOpendirPacket: - p.Path = toLocalPath(p.Path) + lp := s.toLocalPath(p.Path) - if stat, err := os.Stat(p.Path); err != nil { + if stat, err := os.Stat(lp); err != nil { rpkt = statusFromError(p.ID, err) } else if !stat.IsDir() { rpkt = statusFromError(p.ID, &os.PathError{ - Path: p.Path, Err: syscall.ENOTDIR}) + Path: lp, Err: syscall.ENOTDIR, + }) } else { rpkt = (&sshFxpOpenPacket{ ID: p.ID, @@ -315,7 +327,7 @@ func handlePacket(s *Server, p orderedRequest) error { } // Serve serves SFTP connections until the streams stop or the SFTP subsystem -// is stopped. +// is stopped. It returns nil if the server exits cleanly. func (svr *Server) Serve() error { defer func() { if svr.pktMgr.alloc != nil { @@ -341,6 +353,10 @@ func (svr *Server) Serve() error { for { pktType, pktBytes, err = svr.serverConn.recvPacket(svr.pktMgr.getNextOrderID()) if err != nil { + // Check whether the connection terminated cleanly in-between packets. + if err == io.EOF { + err = nil + } // we don't care about releasing allocated pages here, the server will quit and the allocator freed break } @@ -446,7 +462,7 @@ func (p *sshFxpOpenPacket) respond(svr *Server) responsePacket { osFlags |= os.O_EXCL } - f, err := os.OpenFile(toLocalPath(p.Path), osFlags, 0644) + f, err := os.OpenFile(svr.toLocalPath(p.Path), osFlags, 0o644) if err != nil { return statusFromError(p.ID, err) } @@ -484,7 +500,7 @@ func (p *sshFxpSetstatPacket) respond(svr *Server) responsePacket { b := p.Attrs.([]byte) var err error - p.Path = toLocalPath(p.Path) + p.Path = svr.toLocalPath(p.Path) debug("setstat name \"%s\"", p.Path) if (p.Flags & sshFileXferAttrSize) != 0 { @@ -603,13 +619,15 @@ func statusFromError(id uint32, err error) *sshFxpStatusPacket { return ret } - switch e := err.(type) { - case fxerr: + if errors.Is(err, io.EOF) { + ret.StatusError.Code = sshFxEOF + return ret + } + + var e fxerr + if errors.As(err, &e) { ret.StatusError.Code = uint32(e) - default: - if e == io.EOF { - ret.StatusError.Code = sshFxEOF - } + return ret } return ret diff --git a/vendor/github.com/pkg/sftp/server_plan9.go b/vendor/github.com/pkg/sftp/server_plan9.go new file mode 100644 index 0000000000..4e8ed06789 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_plan9.go @@ -0,0 +1,27 @@ +package sftp + +import ( + "path" + "path/filepath" +) + +func (s *Server) toLocalPath(p string) string { + if s.workDir != "" && !path.IsAbs(p) { + p = path.Join(s.workDir, p) + } + + lp := filepath.FromSlash(p) + + if path.IsAbs(p) { + tmp := lp[1:] + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes is absolute, + // then we have a filepath encoded with a prefix '/'. + // e.g. "/#s/boot" to "#s/boot" + return tmp + } + } + + return lp +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_impl.go b/vendor/github.com/pkg/sftp/server_statvfs_impl.go index 94b6d832cd..a5470798cf 100644 --- a/vendor/github.com/pkg/sftp/server_statvfs_impl.go +++ b/vendor/github.com/pkg/sftp/server_statvfs_impl.go @@ -1,3 +1,4 @@ +//go:build darwin || linux // +build darwin linux // fill in statvfs structure with OS specific values diff --git a/vendor/github.com/pkg/sftp/server_statvfs_linux.go b/vendor/github.com/pkg/sftp/server_statvfs_linux.go index 1d180d47c9..615c4157a5 100644 --- a/vendor/github.com/pkg/sftp/server_statvfs_linux.go +++ b/vendor/github.com/pkg/sftp/server_statvfs_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sftp diff --git a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go index fbf49068f9..dd4705bb4f 100644 --- a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go +++ b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go @@ -1,3 +1,4 @@ +//go:build !darwin && !linux && !plan9 // +build !darwin,!linux,!plan9 package sftp diff --git a/vendor/github.com/pkg/sftp/server_unix.go b/vendor/github.com/pkg/sftp/server_unix.go new file mode 100644 index 0000000000..495b397c06 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_unix.go @@ -0,0 +1,16 @@ +//go:build !windows && !plan9 +// +build !windows,!plan9 + +package sftp + +import ( + "path" +) + +func (s *Server) toLocalPath(p string) string { + if s.workDir != "" && !path.IsAbs(p) { + p = path.Join(s.workDir, p) + } + + return p +} diff --git a/vendor/github.com/pkg/sftp/server_windows.go b/vendor/github.com/pkg/sftp/server_windows.go new file mode 100644 index 0000000000..b35be7305b --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_windows.go @@ -0,0 +1,39 @@ +package sftp + +import ( + "path" + "path/filepath" +) + +func (s *Server) toLocalPath(p string) string { + if s.workDir != "" && !path.IsAbs(p) { + p = path.Join(s.workDir, p) + } + + lp := filepath.FromSlash(p) + + if path.IsAbs(p) { + tmp := lp + for len(tmp) > 0 && tmp[0] == '\\' { + tmp = tmp[1:] + } + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes is absolute, + // then we have a filepath encoded with a prefix '/'. + // e.g. "/C:/Windows" to "C:\\Windows" + return tmp + } + + tmp += "\\" + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes but with extra end slash is absolute, + // then we have a filepath encoded with a prefix '/' and a dropped '/' at the end. + // e.g. "/C:" to "C:\\" + return tmp + } + } + + return lp +} diff --git a/vendor/github.com/pkg/sftp/sftp.go b/vendor/github.com/pkg/sftp/sftp.go index 9a63c39dc4..778c8f3d73 100644 --- a/vendor/github.com/pkg/sftp/sftp.go +++ b/vendor/github.com/pkg/sftp/sftp.go @@ -1,5 +1,5 @@ // Package sftp implements the SSH File Transfer Protocol as described in -// https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt package sftp import ( diff --git a/vendor/github.com/pkg/sftp/syscall_fixed.go b/vendor/github.com/pkg/sftp/syscall_fixed.go index d404577765..e84430830e 100644 --- a/vendor/github.com/pkg/sftp/syscall_fixed.go +++ b/vendor/github.com/pkg/sftp/syscall_fixed.go @@ -1,3 +1,4 @@ +//go:build plan9 || windows || (js && wasm) // +build plan9 windows js,wasm // Go defines S_IFMT on windows, plan9 and js/wasm as 0x1f000 instead of diff --git a/vendor/github.com/pkg/sftp/syscall_good.go b/vendor/github.com/pkg/sftp/syscall_good.go index 4c2b240cf7..50052189e2 100644 --- a/vendor/github.com/pkg/sftp/syscall_good.go +++ b/vendor/github.com/pkg/sftp/syscall_good.go @@ -1,4 +1,6 @@ -// +build !plan9,!windows +//go:build !plan9 && !windows && (!js || !wasm) +// +build !plan9 +// +build !windows // +build !js !wasm package sftp diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index b042c896f2..d1d4a85fd7 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -9,7 +9,7 @@ the last thing you want from your Logging library (again...). This does not mean Logrus is dead. Logrus will continue to be maintained for security, (backwards compatible) bug fixes, and performance (where we are -limited by the interface). +limited by the interface). I believe Logrus' biggest contribution is to have played a part in today's widespread use of structured logging in Golang. There doesn't seem to be a @@ -43,7 +43,7 @@ plain text): With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash or Splunk: -```json +```text {"animal":"walrus","level":"info","msg":"A group of walrus emerges from the ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} @@ -99,7 +99,7 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr ``` Note that this does add measurable overhead - the cost will depend on the version of Go, but is between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: +environment via benchmarks: ``` go test -bench=.*CallerTracing ``` @@ -317,6 +317,8 @@ log.SetLevel(log.InfoLevel) It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose environment if your application has that. +Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). + #### Entries Besides the fields added with `WithField` or `WithFields` some fields are diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go index 72e8e3a1b6..7e7703c723 100644 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -4,6 +4,7 @@ import ( "bufio" "io" "runtime" + "strings" ) // Writer at INFO level. See WriterLevel for details. @@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { return NewEntry(logger).WriterLevel(level) } +// Writer returns an io.Writer that writes to the logger at the info log level func (entry *Entry) Writer() *io.PipeWriter { return entry.WriterLevel(InfoLevel) } +// WriterLevel returns an io.Writer that writes to the logger at the given log level func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) + // Determine which log function to use based on the specified log level switch level { case TraceLevel: printFunc = entry.Trace @@ -48,23 +52,51 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { printFunc = entry.Print } + // Start a new goroutine to scan the input and write it to the logger using the specified print function. + // It splits the input into chunks of up to 64KB to avoid buffer overflows. go entry.writerScanner(reader, printFunc) + + // Set a finalizer function to close the writer when it is garbage collected runtime.SetFinalizer(writer, writerFinalizer) return writer } +// writerScanner scans the input from the reader and writes it to the logger func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { scanner := bufio.NewScanner(reader) - for scanner.Scan() { - printFunc(scanner.Text()) + + // Set the buffer size to the maximum token size to avoid buffer overflows + scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) + + // Define a split function to split the input into chunks of up to 64KB + chunkSize := 64 * 1024 // 64KB + splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { + if len(data) > chunkSize { + return chunkSize, data[:chunkSize], nil + } + + return len(data), data, nil } + + //Use the custom split function to split the input + scanner.Split(splitFunc) + + // Scan the input and write it to the logger using the specified print function + for scanner.Scan() { + printFunc(strings.TrimRight(scanner.Text(), "\r\n")) + } + + // If there was an error while scanning the input, log an error if err := scanner.Err(); err != nil { entry.Errorf("Error while reading from Writer: %s", err) } + + // Close the reader when we are done reader.Close() } +// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected func writerFinalizer(writer *io.PipeWriter) { writer.Close() } diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go index 9ba6e10a4a..b419c761ed 100644 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -49,7 +49,8 @@ var supportedKexAlgos = []string{ // P384 and P521 are not constant-time yet, but since we don't // reuse ephemeral keys, using them for ECDH should be OK. kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA256, kexAlgoDH14SHA1, kexAlgoDH1SHA1, + kexAlgoDH14SHA256, kexAlgoDH16SHA512, kexAlgoDH14SHA1, + kexAlgoDH1SHA1, } // serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden @@ -59,8 +60,9 @@ var serverForbiddenKexAlgos = map[string]struct{}{ kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests } -// preferredKexAlgos specifies the default preference for key-exchange algorithms -// in preference order. +// preferredKexAlgos specifies the default preference for key-exchange +// algorithms in preference order. The diffie-hellman-group16-sha512 algorithm +// is disabled by default because it is a bit slower than the others. var preferredKexAlgos = []string{ kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, @@ -70,12 +72,12 @@ var preferredKexAlgos = []string{ // supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods // of authenticating servers) in preference order. var supportedHostKeyAlgos = []string{ - CertAlgoRSASHA512v01, CertAlgoRSASHA256v01, + CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSASHA512, KeyAlgoRSASHA256, + KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA, KeyAlgoDSA, KeyAlgoED25519, @@ -85,7 +87,7 @@ var supportedHostKeyAlgos = []string{ // This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed // because they have reached the end of their useful life. var supportedMACs = []string{ - "hmac-sha2-512-etm@openssh.com", "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96", + "hmac-sha2-256-etm@openssh.com", "hmac-sha2-512-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96", } var supportedCompressions = []string{compressionNone} @@ -119,6 +121,13 @@ func algorithmsForKeyFormat(keyFormat string) []string { } } +// isRSA returns whether algo is a supported RSA algorithm, including certificate +// algorithms. +func isRSA(algo string) bool { + algos := algorithmsForKeyFormat(KeyAlgoRSA) + return contains(algos, underlyingAlgo(algo)) +} + // supportedPubKeyAuthAlgos specifies the supported client public key // authentication algorithms. Note that this doesn't include certificate types // since those use the underlying algorithm. This list is sent to the client if @@ -262,16 +271,16 @@ type Config struct { // unspecified, a size suitable for the chosen cipher is used. RekeyThreshold uint64 - // The allowed key exchanges algorithms. If unspecified then a - // default set of algorithms is used. + // The allowed key exchanges algorithms. If unspecified then a default set + // of algorithms is used. Unsupported values are silently ignored. KeyExchanges []string - // The allowed cipher algorithms. If unspecified then a sensible - // default is used. + // The allowed cipher algorithms. If unspecified then a sensible default is + // used. Unsupported values are silently ignored. Ciphers []string - // The allowed MAC algorithms. If unspecified then a sensible default - // is used. + // The allowed MAC algorithms. If unspecified then a sensible default is + // used. Unsupported values are silently ignored. MACs []string } @@ -288,7 +297,7 @@ func (c *Config) SetDefaults() { var ciphers []string for _, c := range c.Ciphers { if cipherModes[c] != nil { - // reject the cipher if we have no cipherModes definition + // Ignore the cipher if we have no cipherModes definition. ciphers = append(ciphers, c) } } @@ -297,10 +306,26 @@ func (c *Config) SetDefaults() { if c.KeyExchanges == nil { c.KeyExchanges = preferredKexAlgos } + var kexs []string + for _, k := range c.KeyExchanges { + if kexAlgoMap[k] != nil { + // Ignore the KEX if we have no kexAlgoMap definition. + kexs = append(kexs, k) + } + } + c.KeyExchanges = kexs if c.MACs == nil { c.MACs = supportedMACs } + var macs []string + for _, m := range c.MACs { + if macModes[m] != nil { + // Ignore the MAC if we have no macModes definition. + macs = append(macs, m) + } + } + c.MACs = macs if c.RekeyThreshold == 0 { // cipher specific default diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go index 927a90cd46..8a05f79902 100644 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -23,6 +23,7 @@ const ( kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" + kexAlgoDH16SHA512 = "diffie-hellman-group16-sha512" kexAlgoECDH256 = "ecdh-sha2-nistp256" kexAlgoECDH384 = "ecdh-sha2-nistp384" kexAlgoECDH521 = "ecdh-sha2-nistp521" @@ -430,6 +431,17 @@ func init() { hashFunc: crypto.SHA256, } + // This is the group called diffie-hellman-group16-sha512 in RFC + // 8268 and Oakley Group 16 in RFC 3526. + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF", 16) + + kexAlgoMap[kexAlgoDH16SHA512] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + hashFunc: crypto.SHA512, + } + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 9e3870292f..b21322affa 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -370,6 +370,25 @@ func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *c return authErr, perms, nil } +// isAlgoCompatible checks if the signature format is compatible with the +// selected algorithm taking into account edge cases that occur with old +// clients. +func isAlgoCompatible(algo, sigFormat string) bool { + // Compatibility for old clients. + // + // For certificate authentication with OpenSSH 7.2-7.7 signature format can + // be rsa-sha2-256 or rsa-sha2-512 for the algorithm + // ssh-rsa-cert-v01@openssh.com. + // + // With gpg-agent < 2.2.6 the algorithm can be rsa-sha2-256 or rsa-sha2-512 + // for signature format ssh-rsa. + if isRSA(algo) && isRSA(sigFormat) { + return true + } + // Standard case: the underlying algorithm must match the signature format. + return underlyingAlgo(algo) == sigFormat +} + // ServerAuthError represents server authentication errors and is // sometimes returned by NewServerConn. It appends any authentication // errors that may occur, and is returned if all of the authentication @@ -567,7 +586,7 @@ userAuthLoop: authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) break } - if underlyingAlgo(algo) != sig.Format { + if !isAlgoCompatible(algo, sig.Format) { authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo) break } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index b9632380e7..b0d482f9f4 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -19,6 +19,7 @@ import ( "io/fs" "log" "math" + "math/bits" mathrand "math/rand" "net" "net/http" @@ -518,11 +519,14 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { func authorityAddr(scheme string, authority string) (addr string) { host, port, err := net.SplitHostPort(authority) if err != nil { // authority didn't have a port + host = authority + port = "" + } + if port == "" { // authority's port was empty port = "443" if scheme == "http" { port = "80" } - host = authority } if a, err := idna.ToASCII(host); err == nil { host = a @@ -1677,7 +1681,27 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int { return int(n) // doesn't truncate; max is 512K } -var bufPool sync.Pool // of *[]byte +// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running +// streaming requests using small frame sizes occupy large buffers initially allocated for prior +// requests needing big buffers. The size ranges are as follows: +// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB], +// {256 KB, 512 KB], {512 KB, infinity} +// In practice, the maximum scratch buffer size should not exceed 512 KB due to +// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used. +// It exists mainly as a safety measure, for potential future increases in max buffer size. +var bufPools [7]sync.Pool // of *[]byte +func bufPoolIndex(size int) int { + if size <= 16384 { + return 0 + } + size -= 1 + bits := bits.Len(uint(size)) + index := bits - 14 + if index >= len(bufPools) { + return len(bufPools) - 1 + } + return index +} func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { cc := cs.cc @@ -1695,12 +1719,13 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) { // Scratch buffer for reading into & writing from. scratchLen := cs.frameScratchBufferLen(maxFrameSize) var buf []byte - if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen { - defer bufPool.Put(bp) + index := bufPoolIndex(scratchLen) + if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen { + defer bufPools[index].Put(bp) buf = *bp } else { buf = make([]byte, scratchLen) - defer bufPool.Put(&buf) + defer bufPools[index].Put(&buf) } var sawEOF bool diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go index 16c6c6b90c..e61587945b 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build appengine -// +build appengine // This file applies to App Engine first generation runtimes (<= Go 1.9). diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go index a7e27b3d29..9c79aa0a0c 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !appengine -// +build !appengine // This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go index e1755d1d9a..d28140f789 100644 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build appengine -// +build appengine package internal diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 0c4d14929a..8f775fafa6 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -624,7 +624,7 @@ ccflags="$@" $2 ~ /^MEM/ || $2 ~ /^WG/ || $2 ~ /^FIB_RULE_/ || - $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} + $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE|IOMIN$|IOOPT$|ALIGNOFF$|DISCARD|ROTATIONAL$|ZEROOUT$|GETDISKSEQ$)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go new file mode 100644 index 0000000000..ca0513632e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +// +build aix darwin dragonfly freebsd openbsd solaris + +package unix + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index 86213c05d6..fa93d0aa90 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux -// +build linux +//go:build linux || netbsd +// +build linux netbsd package unix @@ -14,8 +14,17 @@ type mremapMmapper struct { mremap func(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) } +var mapper = &mremapMmapper{ + mmapper: mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, + }, + mremap: mremap, +} + func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { - if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&MREMAP_FIXED != 0 { + if newLength <= 0 || len(oldData) == 0 || len(oldData) != cap(oldData) || flags&mremapFixed != 0 { return nil, EINVAL } @@ -32,9 +41,13 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ } bNew := unsafe.Slice((*byte)(unsafe.Pointer(newAddr)), newLength) pNew := &bNew[cap(bNew)-1] - if flags&MREMAP_DONTUNMAP == 0 { + if flags&mremapDontunmap == 0 { delete(m.active, pOld) } m.active[pNew] = bNew return bNew, nil } + +func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { + return mapper.Mremap(oldData, newLength, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index c406ae00f4..9a6e5acacb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -535,21 +535,6 @@ func Fsync(fd int) error { //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = nsendmsg //sys munmap(addr uintptr, length uintptr) (err error) - -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Madvise(b []byte, advice int) (err error) //sys Mprotect(b []byte, prot int) (err error) //sys Mlock(b []byte) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 7705c3270b..4217de518b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -601,20 +601,6 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { // Gethostuuid(uuid *byte, timeout *Timespec) (err error) // Ptrace(req int, pid int, addr uintptr, data int) (ret uintptr, err error) -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Madvise(b []byte, behav int) (err error) //sys Mlock(b []byte) (err error) //sys Mlockall(flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 206921504c..135cc3cd75 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -510,30 +510,36 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { return nil, err } - // Find size. - n := uintptr(0) - if err := sysctl(mib, nil, &n, nil, 0); err != nil { - return nil, err - } - if n == 0 { - return nil, nil - } - if n%SizeofKinfoProc != 0 { - return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) - } + for { + // Find size. + n := uintptr(0) + if err := sysctl(mib, nil, &n, nil, 0); err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } - // Read into buffer of that size. - buf := make([]KinfoProc, n/SizeofKinfoProc) - if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { - return nil, err - } - if n%SizeofKinfoProc != 0 { - return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) - } + // Read into buffer of that size. + buf := make([]KinfoProc, n/SizeofKinfoProc) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&buf[0])), &n, nil, 0); err != nil { + if err == ENOMEM { + // Process table grew. Try again. + continue + } + return nil, err + } + if n%SizeofKinfoProc != 0 { + return nil, fmt.Errorf("sysctl() returned a size of %d, which is not a multiple of %d", n, SizeofKinfoProc) + } - // The actual call may return less than the original reported required - // size so ensure we deal with that. - return buf[:n/SizeofKinfoProc], nil + // The actual call may return less than the original reported required + // size so ensure we deal with that. + return buf[:n/SizeofKinfoProc], nil + } } //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 39de5f1430..a730878e49 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1885,7 +1885,7 @@ func Getpgrp() (pid int) { //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) -//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 +//sys pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error) //sys read(fd int, p []byte) (n int, err error) //sys Removexattr(path string, attr string) (err error) //sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) @@ -2125,28 +2125,6 @@ func writevRacedetect(iovecs []Iovec, n int) { // mmap varies by architecture; see syscall_linux_*.go. //sys munmap(addr uintptr, length uintptr) (err error) //sys mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (xaddr uintptr, err error) - -var mapper = &mremapMmapper{ - mmapper: mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, - }, - mremap: mremap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - -func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { - return mapper.Mremap(oldData, newLength, flags) -} - //sys Madvise(b []byte, advice int) (err error) //sys Mprotect(b []byte, prot int) (err error) //sys Mlock(b []byte) (err error) @@ -2155,6 +2133,12 @@ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { //sys Munlock(b []byte) (err error) //sys Munlockall() (err error) +const ( + mremapFixed = MREMAP_FIXED + mremapDontunmap = MREMAP_DONTUNMAP + mremapMaymove = MREMAP_MAYMOVE +) + // Vmsplice splices user pages from a slice of Iovecs into a pipe specified by fd, // using the specified flags. func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { @@ -2454,6 +2438,39 @@ func Getresgid() (rgid, egid, sgid int) { return int(r), int(e), int(s) } +// Pselect is a wrapper around the Linux pselect6 system call. +// This version does not modify the timeout argument. +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + // Per https://man7.org/linux/man-pages/man2/select.2.html#NOTES, + // The Linux pselect6() system call modifies its timeout argument. + // [Not modifying the argument] is the behavior required by POSIX.1-2001. + var mutableTimeout *Timespec + if timeout != nil { + mutableTimeout = new(Timespec) + *mutableTimeout = *timeout + } + + // The final argument of the pselect6() system call is not a + // sigset_t * pointer, but is instead a structure + var kernelMask *sigset_argpack + if sigmask != nil { + wordBits := 32 << (^uintptr(0) >> 63) // see math.intSize + + // A sigset stores one bit per signal, + // offset by 1 (because signal 0 does not exist). + // So the number of words needed is ⌈__C_NSIG - 1 / wordBits⌉. + sigsetWords := (_C__NSIG - 1 + wordBits - 1) / (wordBits) + + sigsetBytes := uintptr(sigsetWords * (wordBits / 8)) + kernelMask = &sigset_argpack{ + ss: sigmask, + ssLen: sigsetBytes, + } + } + + return pselect6(nfd, r, w, e, mutableTimeout, kernelMask) +} + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 5b21fcfd75..70601ce369 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -40,7 +40,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index a81f5742b8..f5266689af 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -33,7 +33,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 69d2d7c3db..f6ab02ec15 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -28,7 +28,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 76d564095e..93fe59d25d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -31,7 +31,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 35851ef70b..5e6ceee129 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -32,7 +32,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err if timeout != nil { ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} } - return Pselect(nfd, r, w, e, ts, nil) + return pselect6(nfd, r, w, e, ts, nil) } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) @@ -177,3 +177,14 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +//sys riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error) + +func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error) { + var setSize uintptr + + if set != nil { + setSize = uintptr(unsafe.Sizeof(*set)) + } + return riscvHWProbe(pairs, setSize, set, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 018d7d4782..ddd1ac8534 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -360,6 +360,18 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) +const ( + mremapFixed = MAP_FIXED + mremapDontunmap = 0 + mremapMaymove = 0 +) + +//sys mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) = SYS_MREMAP + +func mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (uintptr, error) { + return mremapNetBSD(oldaddr, oldlength, newaddr, newlength, flags) +} + /* * Unimplemented */ @@ -564,7 +576,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { // mq_timedreceive // mq_timedsend // mq_unlink -// mremap // msgget // msgrcv // msgsnd diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index b600a289d3..72d23575fa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -716,20 +716,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { return } -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - // Event Ports type fileObjCookie struct { diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 8e48c29ec3..8bb30e7ce3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -147,6 +147,14 @@ func (m *mmapper) Munmap(data []byte) (err error) { return nil } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index d3d49ec3ed..44e72edb42 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -285,25 +285,11 @@ func Close(fd int) (err error) { return } -var mapper = &mmapper{ - active: make(map[*byte][]byte), - mmap: mmap, - munmap: munmap, -} - // Dummy function: there are no semantics for Madvise on z/OS func Madvise(b []byte, advice int) (err error) { return } -func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return mapper.Mmap(fd, offset, length, prot, flags) -} - -func Munmap(b []byte) (err error) { - return mapper.Munmap(b) -} - //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index a46df0f1e5..cfb1430018 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80041270 BLKBSZSET = 0x40041271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80041272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 6cd4a3ea9d..df64f2d590 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c7ebee24df..3025cd5b2d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80041270 BLKBSZSET = 0x40041271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80041272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 12a9a1389e..09e1ffbef9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index f26a164f4a..a457235407 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 890bc3c9b7..fee7dfb819 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 549f26ac64..a5b2373aea 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index e0365e32c1..5dde82c98a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index fdccce15ca..2e80ea6b33 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index b2205c83fa..a65dcd7cbe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -27,22 +27,31 @@ const ( B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40041272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 81aa5ad0f6..cbd34e3d89 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 76807a1fd4..e4afa7a317 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -27,22 +27,31 @@ const ( B57600 = 0x10 B576000 = 0x15 B921600 = 0x16 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1f BS1 = 0x8000 BSDLY = 0x8000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index d4a5ab9e4e..44f45a039d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 66e65db951..74733e260f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -27,22 +27,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x127a BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 + BLKDISCARD = 0x1277 + BLKDISCARDZEROES = 0x127c BLKFLSBUF = 0x1261 BLKFRAGET = 0x1265 BLKFRASET = 0x1264 + BLKGETDISKSEQ = 0x80081280 BLKGETSIZE = 0x1260 BLKGETSIZE64 = 0x80081272 + BLKIOMIN = 0x1278 + BLKIOOPT = 0x1279 BLKPBSZGET = 0x127b BLKRAGET = 0x1263 BLKRASET = 0x1262 BLKROGET = 0x125e BLKROSET = 0x125d + BLKROTATIONAL = 0x127e BLKRRPART = 0x125f + BLKSECDISCARD = 0x127d BLKSECTGET = 0x1267 BLKSECTSET = 0x1266 BLKSSZGET = 0x1268 + BLKZEROOUT = 0x127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 48984202c6..f5f3934b1a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -30,22 +30,31 @@ const ( B57600 = 0x1001 B576000 = 0x1006 B921600 = 0x1007 + BLKALIGNOFF = 0x2000127a BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 + BLKDISCARD = 0x20001277 + BLKDISCARDZEROES = 0x2000127c BLKFLSBUF = 0x20001261 BLKFRAGET = 0x20001265 BLKFRASET = 0x20001264 + BLKGETDISKSEQ = 0x40081280 BLKGETSIZE = 0x20001260 BLKGETSIZE64 = 0x40081272 + BLKIOMIN = 0x20001278 + BLKIOOPT = 0x20001279 BLKPBSZGET = 0x2000127b BLKRAGET = 0x20001263 BLKRASET = 0x20001262 BLKROGET = 0x2000125e BLKROSET = 0x2000125d + BLKROTATIONAL = 0x2000127e BLKRRPART = 0x2000125f + BLKSECDISCARD = 0x2000127d BLKSECTGET = 0x20001267 BLKSECTSET = 0x20001266 BLKSSZGET = 0x20001268 + BLKZEROOUT = 0x2000127f BOTHER = 0x1000 BS1 = 0x2000 BSDLY = 0x2000 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 7ceec233fb..a07321bed9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1356,7 +1356,7 @@ func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { +func pselect6(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *sigset_argpack) (n int, err error) { r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) n = int(r0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 0b29239583..0ab4f2ed72 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -531,3 +531,19 @@ func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, f } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func riscvHWProbe(pairs []RISCVHWProbePairs, cpuCount uintptr, cpus *CPUSet, flags uint) (err error) { + var _p0 unsafe.Pointer + if len(pairs) > 0 { + _p0 = unsafe.Pointer(&pairs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_RISCV_HWPROBE, uintptr(_p0), uintptr(len(pairs)), uintptr(cpuCount), uintptr(unsafe.Pointer(cpus)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index cdb2af5ae0..35f499b32a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 9d25f76b0b..3cda65b0da 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index d3f8035169..1e1fea902b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 887188a529..3b77da1107 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -1858,3 +1858,14 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mremapNetBSD(oldp uintptr, oldsize uintptr, newp uintptr, newsize uintptr, flags int) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MREMAP, uintptr(oldp), uintptr(oldsize), uintptr(newp), uintptr(newsize), uintptr(flags), 0) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 3e594a8c09..ef285c567b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -251,6 +251,8 @@ const ( SYS_ACCEPT4 = 242 SYS_RECVMMSG = 243 SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_RISCV_HWPROBE = 258 + SYS_RISCV_FLUSH_ICACHE = 259 SYS_WAIT4 = 260 SYS_PRLIMIT64 = 261 SYS_FANOTIFY_INIT = 262 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 02e2462c8f..26ef52aafc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -866,6 +866,11 @@ const ( POLLNVAL = 0x20 ) +type sigset_argpack struct { + ss *Sigset_t + ssLen uintptr +} + type SignalfdSiginfo struct { Signo uint32 Errno int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 9ea54b7b86..83c69c119f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -718,3 +718,26 @@ type SysvShmDesc struct { _ uint64 _ uint64 } + +type RISCVHWProbePairs struct { + Key int64 + Value uint64 +} + +const ( + RISCV_HWPROBE_KEY_MVENDORID = 0x0 + RISCV_HWPROBE_KEY_MARCHID = 0x1 + RISCV_HWPROBE_KEY_MIMPID = 0x2 + RISCV_HWPROBE_KEY_BASE_BEHAVIOR = 0x3 + RISCV_HWPROBE_BASE_BEHAVIOR_IMA = 0x1 + RISCV_HWPROBE_KEY_IMA_EXT_0 = 0x4 + RISCV_HWPROBE_IMA_FD = 0x1 + RISCV_HWPROBE_IMA_C = 0x2 + RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 + RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 + RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 + RISCV_HWPROBE_MISALIGNED_SLOW = 0x2 + RISCV_HWPROBE_MISALIGNED_FAST = 0x3 + RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 + RISCV_HWPROBE_MISALIGNED_MASK = 0x7 +) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 9645900754..373d16388a 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -135,14 +135,14 @@ func Getpagesize() int { return 4096 } // NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. // This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallback(fn interface{}) uintptr { return syscall.NewCallback(fn) } // NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. // This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallbackCDecl(fn interface{}) uintptr { return syscall.NewCallbackCDecl(fn) } diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json index 2b26e29a55..4198b242d2 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json @@ -42183,7 +42183,7 @@ } } }, - "revision": "20230720", + "revision": "20230725", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -65063,6 +65063,10 @@ "description": "[Output Only] The URL of the instance. The URL can exist even if the instance has not yet been created.", "type": "string" }, + "instanceFlexibilityOverride": { + "$ref": "ManagedInstanceInstanceFlexibilityOverride", + "description": "[Output Only] The overrides to instance properties resulting from InstanceFlexibilityPolicy." + }, "instanceHealth": { "description": "[Output Only] Health state of the instance per health-check.", "items": { @@ -65159,6 +65163,28 @@ }, "type": "object" }, + "ManagedInstanceInstanceFlexibilityOverride": { + "id": "ManagedInstanceInstanceFlexibilityOverride", + "properties": { + "machineType": { + "description": "The machine type to be used for this instance.", + "type": "string" + }, + "provisioningModel": { + "description": "The provisioning model to be used for this instance.", + "enum": [ + "SPOT", + "STANDARD" + ], + "enumDescriptions": [ + "Heavily discounted, no guaranteed runtime.", + "Standard provisioning with user controlled runtime, no discounts." + ], + "type": "string" + } + }, + "type": "object" + }, "ManagedInstanceInstanceHealth": { "id": "ManagedInstanceInstanceHealth", "properties": { @@ -70600,7 +70626,7 @@ "type": "object" }, "Operation": { - "description": "Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/alpha/globalOperations) * [Regional](/compute/docs/reference/rest/alpha/regionOperations) * [Zonal](/compute/docs/reference/rest/alpha/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zonalOperations` resource. For more information, read Global, Regional, and Zonal Resources.", + "description": "Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/alpha/globalOperations) * [Regional](/compute/docs/reference/rest/alpha/regionOperations) * [Zonal](/compute/docs/reference/rest/alpha/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zoneOperations` resource. For more information, read Global, Regional, and Zonal Resources.", "id": "Operation", "properties": { "clientOperationId": { diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go index 221ffcbd11..e89007c08e 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go @@ -34327,6 +34327,10 @@ type ManagedInstance struct { // even if the instance has not yet been created. Instance string `json:"instance,omitempty"` + // InstanceFlexibilityOverride: [Output Only] The overrides to instance + // properties resulting from InstanceFlexibilityPolicy. + InstanceFlexibilityOverride *ManagedInstanceInstanceFlexibilityOverride `json:"instanceFlexibilityOverride,omitempty"` + // InstanceHealth: [Output Only] Health state of the instance per // health-check. InstanceHealth []*ManagedInstanceInstanceHealth `json:"instanceHealth,omitempty"` @@ -34447,6 +34451,42 @@ func (s *ManagedInstanceAllInstancesConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type ManagedInstanceInstanceFlexibilityOverride struct { + // MachineType: The machine type to be used for this instance. + MachineType string `json:"machineType,omitempty"` + + // ProvisioningModel: The provisioning model to be used for this + // instance. + // + // Possible values: + // "SPOT" - Heavily discounted, no guaranteed runtime. + // "STANDARD" - Standard provisioning with user controlled runtime, no + // discounts. + ProvisioningModel string `json:"provisioningModel,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MachineType") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MachineType") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedInstanceInstanceFlexibilityOverride) MarshalJSON() ([]byte, error) { + type NoMethod ManagedInstanceInstanceFlexibilityOverride + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type ManagedInstanceInstanceHealth struct { // DetailedHealthState: [Output Only] The current detailed instance // health state. @@ -41873,7 +41913,7 @@ func (s *NotificationEndpointsScopedListWarningData) MarshalJSON() ([]byte, erro // regional or zonal. - For global operations, use the // `globalOperations` resource. - For regional operations, use the // `regionOperations` resource. - For zonal operations, use the -// `zonalOperations` resource. For more information, read Global, +// `zoneOperations` resource. For more information, read Global, // Regional, and Zonal Resources. type Operation struct { // ClientOperationId: [Output Only] The value of `requestId` if you diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json index e8a2fe1d3c..34cdf25f6e 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json @@ -15190,6 +15190,56 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "patch": { + "description": "Patches the specified NetworkAttachment resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + "httpMethod": "PATCH", + "id": "compute.networkAttachments.patch", + "parameterOrder": [ + "project", + "region", + "networkAttachment" + ], + "parameters": { + "networkAttachment": { + "description": "Name of the NetworkAttachment resource to patch.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + "request": { + "$ref": "NetworkAttachment" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setIamPolicy": { "description": "Sets the access control policy on the specified resource. Replaces any existing policy.", "flatPath": "projects/{project}/regions/{region}/networkAttachments/{resource}/setIamPolicy", @@ -30699,6 +30749,53 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getNatIpInfo": { + "description": "Retrieves runtime NAT IP information.", + "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + "httpMethod": "GET", + "id": "compute.routers.getNatIpInfo", + "parameterOrder": [ + "project", + "region", + "router" + ], + "parameters": { + "natName": { + "description": "Name of the nat service to filter the NAT IP information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "router": { + "description": "Name of the Router resource to query for Nat IP information. The name should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + "response": { + "$ref": "NatIpInfoResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "getNatMappingInfo": { "description": "Retrieves runtime Nat mapping information of VM endpoints.", "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatMappingInfo", @@ -32356,6 +32453,77 @@ } } }, + "snapshotSettings": { + "methods": { + "get": { + "description": "Get snapshot settings.", + "flatPath": "projects/{project}/global/snapshotSettings", + "httpMethod": "GET", + "id": "compute.snapshotSettings.get", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/snapshotSettings", + "response": { + "$ref": "SnapshotSettings" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "description": "Patch snapshot settings.", + "flatPath": "projects/{project}/global/snapshotSettings", + "httpMethod": "PATCH", + "id": "compute.snapshotSettings.patch", + "parameterOrder": [ + "project" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "updateMask": { + "description": "update_mask indicates fields to be updated as part of this request.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/snapshotSettings", + "request": { + "$ref": "SnapshotSettings" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, "snapshots": { "methods": { "delete": { @@ -38620,7 +38788,7 @@ } } }, - "revision": "20230711", + "revision": "20230725", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -40234,7 +40402,7 @@ "type": "string" }, "replicaZones": { - "description": "Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone. You can't use this option with boot disks.", + "description": "Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone.", "items": { "type": "string" }, @@ -43271,9 +43439,11 @@ "description": "The type of commitment, which affects the discount rate and the eligible resources. Type MEMORY_OPTIMIZED specifies a commitment that will only apply to memory optimized machines. Type ACCELERATOR_OPTIMIZED specifies a commitment that will only apply to accelerator optimized machines.", "enum": [ "ACCELERATOR_OPTIMIZED", + "ACCELERATOR_OPTIMIZED_A3", "COMPUTE_OPTIMIZED", "COMPUTE_OPTIMIZED_C2D", "COMPUTE_OPTIMIZED_C3", + "COMPUTE_OPTIMIZED_H3", "GENERAL_PURPOSE", "GENERAL_PURPOSE_E2", "GENERAL_PURPOSE_N2", @@ -43297,6 +43467,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -47094,7 +47266,7 @@ "type": "string" }, "noAutomateDnsZone": { - "description": "This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field.", + "description": "This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field. Once set, this field is not mutable.", "type": "boolean" }, "portRange": { @@ -47168,7 +47340,7 @@ "type": "string" }, "target": { - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. - For load balancers, see the \"Target\" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: - vpc-sc - APIs that support VPC Service Controls. - all-apis - All supported Google APIs. - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. ", + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. - For load balancers, see the \"Target\" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: - vpc-sc - APIs that support VPC Service Controls. - all-apis - All supported Google APIs. - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. The target is not mutable once set as a service attachment. ", "type": "string" } }, @@ -58999,6 +59171,72 @@ }, "type": "object" }, + "NatIpInfo": { + "description": "Contains NAT IP information of a NAT config (i.e. usage status, mode).", + "id": "NatIpInfo", + "properties": { + "natIpInfoMappings": { + "description": "A list of all NAT IPs assigned to this NAT config.", + "items": { + "$ref": "NatIpInfoNatIpInfoMapping" + }, + "type": "array" + }, + "natName": { + "description": "Name of the NAT config which the NAT IP belongs to.", + "type": "string" + } + }, + "type": "object" + }, + "NatIpInfoNatIpInfoMapping": { + "description": "Contains information of a NAT IP.", + "id": "NatIpInfoNatIpInfoMapping", + "properties": { + "mode": { + "description": "Specifies whether NAT IP is auto or manual.", + "enum": [ + "AUTO", + "MANUAL" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "natIp": { + "description": "NAT IP address. For example: 203.0.113.11.", + "type": "string" + }, + "usage": { + "description": "Specifies whether NAT IP is currently serving at least one endpoint or not.", + "enum": [ + "IN_USE", + "UNUSED" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "NatIpInfoResponse": { + "id": "NatIpInfoResponse", + "properties": { + "result": { + "description": "[Output Only] A list of NAT IP information.", + "items": { + "$ref": "NatIpInfo" + }, + "type": "array" + } + }, + "type": "object" + }, "Network": { "description": "Represents a VPC Network resource. Networks connect resources to each other and to the internet. For more information, read Virtual Private Cloud (VPC) Network.", "id": "Network", @@ -63660,7 +63898,7 @@ "type": "object" }, "Operation": { - "description": "Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/beta/globalOperations) * [Regional](/compute/docs/reference/rest/beta/regionOperations) * [Zonal](/compute/docs/reference/rest/beta/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zonalOperations` resource. For more information, read Global, Regional, and Zonal Resources.", + "description": "Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/beta/globalOperations) * [Regional](/compute/docs/reference/rest/beta/regionOperations) * [Zonal](/compute/docs/reference/rest/beta/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zoneOperations` resource. For more information, read Global, Regional, and Zonal Resources.", "id": "Operation", "properties": { "clientOperationId": { @@ -63778,6 +64016,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "setCommonInstanceMetadataOperationMetadata": { + "$ref": "SetCommonInstanceMetadataOperationMetadata", + "description": "[Output Only] If the operation is for projects.setCommonInstanceMetadata, this field will contain information on all underlying zonal actions and their state." + }, "startTime": { "description": "[Output Only] The time that this operation was started by the server. This value is in RFC3339 text format.", "type": "string" @@ -63810,7 +64052,7 @@ "type": "string" }, "user": { - "description": "[Output Only] User who requested the operation, for example: `user@example.com`.", + "description": "[Output Only] User who requested the operation, for example: `user@example.com` or `alice_smith_identifier (global/workforcePools/example-com-us-employees)`.", "type": "string" }, "warnings": { @@ -74396,6 +74638,53 @@ }, "type": "object" }, + "SetCommonInstanceMetadataOperationMetadata": { + "id": "SetCommonInstanceMetadataOperationMetadata", + "properties": { + "clientOperationId": { + "description": "[Output Only] The client operation id.", + "type": "string" + }, + "perLocationOperations": { + "additionalProperties": { + "$ref": "SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo" + }, + "description": "[Output Only] Status information per location (location name is key). Example key: zones/us-central1-a", + "type": "object" + } + }, + "type": "object" + }, + "SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo": { + "id": "SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo", + "properties": { + "error": { + "$ref": "Status", + "description": "[Output Only] If state is `ABANDONED` or `FAILED`, this field is populated." + }, + "state": { + "description": "[Output Only] Status of the action, which can be one of the following: `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`.", + "enum": [ + "ABANDONED", + "DONE", + "FAILED", + "PROPAGATED", + "PROPAGATING", + "UNSPECIFIED" + ], + "enumDescriptions": [ + "Operation not tracked in this location e.g. zone is marked as DOWN.", + "Operation has completed successfully.", + "Operation is in an error state.", + "Operation is confirmed to be in the location.", + "Operation is not yet confirmed to have been created in the location.", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "ShareSettings": { "description": "The share setting for reservations and sole tenancy node groups.", "id": "ShareSettings", @@ -74966,6 +75255,56 @@ }, "type": "object" }, + "SnapshotSettings": { + "id": "SnapshotSettings", + "properties": { + "storageLocation": { + "$ref": "SnapshotSettingsStorageLocationSettings", + "description": "Policy of which storage location is going to be resolved, and additional data that particularizes how the policy is going to be carried out." + } + }, + "type": "object" + }, + "SnapshotSettingsStorageLocationSettings": { + "id": "SnapshotSettingsStorageLocationSettings", + "properties": { + "locations": { + "additionalProperties": { + "$ref": "SnapshotSettingsStorageLocationSettingsStorageLocationPreference" + }, + "description": "When the policy is SPECIFIC_LOCATIONS, snapshots will be stored in the locations listed in this field. Keys are GCS bucket locations.", + "type": "object" + }, + "policy": { + "description": "The chosen location policy.", + "enum": [ + "LOCAL_REGION", + "NEAREST_MULTI_REGION", + "SPECIFIC_LOCATIONS", + "STORAGE_LOCATION_POLICY_UNSPECIFIED" + ], + "enumDescriptions": [ + "Store snapshot in the same region as with the originating disk. No additional parameters are needed.", + "Store snapshot to the nearest multi region GCS bucket, relative to the originating disk. No additional parameters are needed.", + "Store snapshot in the specific locations, as specified by the user. The list of regions to store must be defined under the `locations` field.", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "SnapshotSettingsStorageLocationSettingsStorageLocationPreference": { + "description": "A structure for specifying storage locations.", + "id": "SnapshotSettingsStorageLocationSettingsStorageLocationPreference", + "properties": { + "name": { + "description": "Name of the location. It should be one of the GCS buckets.", + "type": "string" + } + }, + "type": "object" + }, "SourceDiskEncryptionKey": { "id": "SourceDiskEncryptionKey", "properties": { @@ -76447,6 +76786,33 @@ }, "type": "object" }, + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", + "id": "Status", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "type": "string" + } + }, + "type": "object" + }, "Subnetwork": { "description": "Represents a Subnetwork resource. A subnetwork (also known as a subnet) is a logical partition of a Virtual Private Cloud network with one primary IP range and zero or more secondary IP ranges. For more information, read Virtual Private Cloud (VPC) Network.", "id": "Subnetwork", @@ -76548,6 +76914,7 @@ "purpose": { "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", "enum": [ + "GLOBAL_MANAGED_PROXY", "INTERNAL_HTTPS_LOAD_BALANCER", "PRIVATE", "PRIVATE_NAT", @@ -76556,6 +76923,7 @@ "REGIONAL_MANAGED_PROXY" ], "enumDescriptions": [ + "Subnet reserved for Global Internal HTTP(S) Load Balancing.", "Subnet reserved for Internal HTTP(S) Load Balancing.", "Regular user created or automatically created subnet.", "Subnetwork used as source range for Private NAT Gateways.", @@ -81823,6 +82191,7 @@ "purpose": { "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", "enum": [ + "GLOBAL_MANAGED_PROXY", "INTERNAL_HTTPS_LOAD_BALANCER", "PRIVATE", "PRIVATE_NAT", @@ -81831,6 +82200,7 @@ "REGIONAL_MANAGED_PROXY" ], "enumDescriptions": [ + "Subnet reserved for Global Internal HTTP(S) Load Balancing.", "Subnet reserved for Internal HTTP(S) Load Balancing.", "Regular user created or automatically created subnet.", "Subnetwork used as source range for Private NAT Gateways.", diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go index 73d2223e18..60d4ef19e0 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go @@ -225,6 +225,7 @@ func New(client *http.Client) (*Service, error) { s.Routes = NewRoutesService(s) s.SecurityPolicies = NewSecurityPoliciesService(s) s.ServiceAttachments = NewServiceAttachmentsService(s) + s.SnapshotSettings = NewSnapshotSettingsService(s) s.Snapshots = NewSnapshotsService(s) s.SslCertificates = NewSslCertificatesService(s) s.SslPolicies = NewSslPoliciesService(s) @@ -410,6 +411,8 @@ type Service struct { ServiceAttachments *ServiceAttachmentsService + SnapshotSettings *SnapshotSettingsService + Snapshots *SnapshotsService SslCertificates *SslCertificatesService @@ -1172,6 +1175,15 @@ type ServiceAttachmentsService struct { s *Service } +func NewSnapshotSettingsService(s *Service) *SnapshotSettingsService { + rs := &SnapshotSettingsService{s: s} + return rs +} + +type SnapshotSettingsService struct { + s *Service +} + func NewSnapshotsService(s *Service) *SnapshotsService { rs := &SnapshotsService{s: s} return rs @@ -3491,8 +3503,7 @@ type AttachedDiskInitializeParams struct { // ReplicaZones: Required for each regional disk associated with the // instance. Specify the URLs of the zones where the disk should be // replicated to. You must provide exactly two replica zones, and one - // zone must be the same as the instance zone. You can't use this option - // with boot disks. + // zone must be the same as the instance zone. ReplicaZones []string `json:"replicaZones,omitempty"` // ResourceManagerTags: Resource manager tags to be bound to the disk. @@ -8204,9 +8215,11 @@ type Commitment struct { // // Possible values: // "ACCELERATOR_OPTIMIZED" + // "ACCELERATOR_OPTIMIZED_A3" // "COMPUTE_OPTIMIZED" // "COMPUTE_OPTIMIZED_C2D" // "COMPUTE_OPTIMIZED_C3" + // "COMPUTE_OPTIMIZED_H3" // "GENERAL_PURPOSE" // "GENERAL_PURPOSE_E2" // "GENERAL_PURPOSE_N2" @@ -13713,7 +13726,8 @@ type ForwardingRule struct { // NoAutomateDnsZone: This is used in PSC consumer ForwardingRule to // control whether it should try to auto-generate a DNS zone or not. - // Non-PSC forwarding rules do not use this field. + // Non-PSC forwarding rules do not use this field. Once set, this field + // is not mutable. NoAutomateDnsZone bool `json:"noAutomateDnsZone,omitempty"` // PortRange: This field can only be used: - If IPProtocol is one of @@ -13824,7 +13838,8 @@ type ForwardingRule struct { // vpc-sc - APIs that support VPC Service Controls. - all-apis - All // supported Google APIs. - For Private Service Connect forwarding rules // that forward traffic to managed services, the target must be a - // service attachment. + // service attachment. The target is not mutable once set as a service + // attachment. Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -30942,6 +30957,113 @@ func (s *NamedPort) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NatIpInfo: Contains NAT IP information of a NAT config (i.e. usage +// status, mode). +type NatIpInfo struct { + // NatIpInfoMappings: A list of all NAT IPs assigned to this NAT config. + NatIpInfoMappings []*NatIpInfoNatIpInfoMapping `json:"natIpInfoMappings,omitempty"` + + // NatName: Name of the NAT config which the NAT IP belongs to. + NatName string `json:"natName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NatIpInfoMappings") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NatIpInfoMappings") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NatIpInfo) MarshalJSON() ([]byte, error) { + type NoMethod NatIpInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NatIpInfoNatIpInfoMapping: Contains information of a NAT IP. +type NatIpInfoNatIpInfoMapping struct { + // Mode: Specifies whether NAT IP is auto or manual. + // + // Possible values: + // "AUTO" + // "MANUAL" + Mode string `json:"mode,omitempty"` + + // NatIp: NAT IP address. For example: 203.0.113.11. + NatIp string `json:"natIp,omitempty"` + + // Usage: Specifies whether NAT IP is currently serving at least one + // endpoint or not. + // + // Possible values: + // "IN_USE" + // "UNUSED" + Usage string `json:"usage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Mode") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Mode") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NatIpInfoNatIpInfoMapping) MarshalJSON() ([]byte, error) { + type NoMethod NatIpInfoNatIpInfoMapping + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NatIpInfoResponse struct { + // Result: [Output Only] A list of NAT IP information. + Result []*NatIpInfo `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NatIpInfoResponse) MarshalJSON() ([]byte, error) { + type NoMethod NatIpInfoResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Network: Represents a VPC Network resource. Networks connect // resources to each other and to the internet. For more information, // read Virtual Private Cloud (VPC) Network. @@ -37164,7 +37286,7 @@ func (s *NotificationEndpointListWarningData) MarshalJSON() ([]byte, error) { // regional or zonal. - For global operations, use the // `globalOperations` resource. - For regional operations, use the // `regionOperations` resource. - For zonal operations, use the -// `zonalOperations` resource. For more information, read Global, +// `zoneOperations` resource. For more information, read Global, // Regional, and Zonal Resources. type Operation struct { // ClientOperationId: [Output Only] The value of `requestId` if you @@ -37236,6 +37358,11 @@ type Operation struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SetCommonInstanceMetadataOperationMetadata: [Output Only] If the + // operation is for projects.setCommonInstanceMetadata, this field will + // contain information on all underlying zonal actions and their state. + SetCommonInstanceMetadataOperationMetadata *SetCommonInstanceMetadataOperationMetadata `json:"setCommonInstanceMetadataOperationMetadata,omitempty"` + // StartTime: [Output Only] The time that this operation was started by // the server. This value is in RFC3339 text format. StartTime string `json:"startTime,omitempty"` @@ -37263,7 +37390,8 @@ type Operation struct { TargetLink string `json:"targetLink,omitempty"` // User: [Output Only] User who requested the operation, for example: - // `user@example.com`. + // `user@example.com` or `alice_smith_identifier + // (global/workforcePools/example-com-us-employees)`. User string `json:"user,omitempty"` // Warnings: [Output Only] If warning messages are generated during @@ -51971,6 +52099,81 @@ func (s *ServiceAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SetCommonInstanceMetadataOperationMetadata struct { + // ClientOperationId: [Output Only] The client operation id. + ClientOperationId string `json:"clientOperationId,omitempty"` + + // PerLocationOperations: [Output Only] Status information per location + // (location name is key). Example key: zones/us-central1-a + PerLocationOperations map[string]SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo `json:"perLocationOperations,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClientOperationId") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClientOperationId") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SetCommonInstanceMetadataOperationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod SetCommonInstanceMetadataOperationMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo struct { + // Error: [Output Only] If state is `ABANDONED` or `FAILED`, this field + // is populated. + Error *Status `json:"error,omitempty"` + + // State: [Output Only] Status of the action, which can be one of the + // following: `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or + // `DONE`. + // + // Possible values: + // "ABANDONED" - Operation not tracked in this location e.g. zone is + // marked as DOWN. + // "DONE" - Operation has completed successfully. + // "FAILED" - Operation is in an error state. + // "PROPAGATED" - Operation is confirmed to be in the location. + // "PROPAGATING" - Operation is not yet confirmed to have been created + // in the location. + // "UNSPECIFIED" + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Error") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Error") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo) MarshalJSON() ([]byte, error) { + type NoMethod SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ShareSettings: The share setting for reservations and sole tenancy // node groups. type ShareSettings struct { @@ -52796,6 +52999,112 @@ func (s *SnapshotListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SnapshotSettings struct { + // StorageLocation: Policy of which storage location is going to be + // resolved, and additional data that particularizes how the policy is + // going to be carried out. + StorageLocation *SnapshotSettingsStorageLocationSettings `json:"storageLocation,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "StorageLocation") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "StorageLocation") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SnapshotSettings) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SnapshotSettingsStorageLocationSettings struct { + // Locations: When the policy is SPECIFIC_LOCATIONS, snapshots will be + // stored in the locations listed in this field. Keys are GCS bucket + // locations. + Locations map[string]SnapshotSettingsStorageLocationSettingsStorageLocationPreference `json:"locations,omitempty"` + + // Policy: The chosen location policy. + // + // Possible values: + // "LOCAL_REGION" - Store snapshot in the same region as with the + // originating disk. No additional parameters are needed. + // "NEAREST_MULTI_REGION" - Store snapshot to the nearest multi region + // GCS bucket, relative to the originating disk. No additional + // parameters are needed. + // "SPECIFIC_LOCATIONS" - Store snapshot in the specific locations, as + // specified by the user. The list of regions to store must be defined + // under the `locations` field. + // "STORAGE_LOCATION_POLICY_UNSPECIFIED" + Policy string `json:"policy,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Locations") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Locations") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SnapshotSettingsStorageLocationSettings) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotSettingsStorageLocationSettings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// SnapshotSettingsStorageLocationSettingsStorageLocationPreference: A +// structure for specifying storage locations. +type SnapshotSettingsStorageLocationSettingsStorageLocationPreference struct { + // Name: Name of the location. It should be one of the GCS buckets. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Name") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Name") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SnapshotSettingsStorageLocationSettingsStorageLocationPreference) MarshalJSON() ([]byte, error) { + type NoMethod SnapshotSettingsStorageLocationSettingsStorageLocationPreference + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type SourceDiskEncryptionKey struct { // DiskEncryptionKey: The customer-supplied encryption key of the source // disk. Required if the source disk is protected by a customer-supplied @@ -54755,6 +55064,50 @@ func (s *StatefulPolicyPreservedStateNetworkIp) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Status: The `Status` type defines a logical error model that is +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by gRPC (https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the API Design Guide +// (https://cloud.google.com/apis/design/errors). +type Status struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Status) MarshalJSON() ([]byte, error) { + type NoMethod Status + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Subnetwork: Represents a Subnetwork resource. A subnetwork (also // known as a subnet) is a logical partition of a Virtual Private Cloud // network with one primary IP range and zero or more secondary IP @@ -54902,6 +55255,8 @@ type Subnetwork struct { // purpose field is set to REGIONAL_MANAGED_PROXY. // // Possible values: + // "GLOBAL_MANAGED_PROXY" - Subnet reserved for Global Internal + // HTTP(S) Load Balancing. // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal // HTTP(S) Load Balancing. // "PRIVATE" - Regular user created or automatically created subnet. @@ -62286,6 +62641,8 @@ type UsableSubnetwork struct { // purpose field is set to REGIONAL_MANAGED_PROXY. // // Possible values: + // "GLOBAL_MANAGED_PROXY" - Subnet reserved for Global Internal + // HTTP(S) Load Balancing. // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal // HTTP(S) Load Balancing. // "PRIVATE" - Regular user created or automatically created subnet. @@ -129188,6 +129545,197 @@ func (c *NetworkAttachmentsListCall) Pages(ctx context.Context, f func(*NetworkA } } +// method id "compute.networkAttachments.patch": + +type NetworkAttachmentsPatchCall struct { + s *Service + project string + region string + networkAttachment string + networkattachment *NetworkAttachment + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches the specified NetworkAttachment resource with the data +// included in the request. This method supports PATCH semantics and +// uses JSON merge patch format and processing rules. +// +// - networkAttachment: Name of the NetworkAttachment resource to patch. +// - project: Project ID for this request. +// - region: Name of the region for this request. +func (r *NetworkAttachmentsService) Patch(project string, region string, networkAttachment string, networkattachment *NetworkAttachment) *NetworkAttachmentsPatchCall { + c := &NetworkAttachmentsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.networkAttachment = networkAttachment + c.networkattachment = networkattachment + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). end_interface: +// MixerMutationRequestBuilder +func (c *NetworkAttachmentsPatchCall) RequestId(requestId string) *NetworkAttachmentsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NetworkAttachmentsPatchCall) Fields(s ...googleapi.Field) *NetworkAttachmentsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NetworkAttachmentsPatchCall) Context(ctx context.Context) *NetworkAttachmentsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NetworkAttachmentsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NetworkAttachmentsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.networkattachment) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "networkAttachment": c.networkAttachment, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.networkAttachments.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NetworkAttachmentsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches the specified NetworkAttachment resource with the data included in the request. This method supports PATCH semantics and uses JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "httpMethod": "PATCH", + // "id": "compute.networkAttachments.patch", + // "parameterOrder": [ + // "project", + // "region", + // "networkAttachment" + // ], + // "parameters": { + // "networkAttachment": { + // "description": "Name of the NetworkAttachment resource to patch.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000). end_interface: MixerMutationRequestBuilder", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/networkAttachments/{networkAttachment}", + // "request": { + // "$ref": "NetworkAttachment" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.networkAttachments.setIamPolicy": type NetworkAttachmentsSetIamPolicyCall struct { @@ -192885,6 +193433,191 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { } +// method id "compute.routers.getNatIpInfo": + +type RoutersGetNatIpInfoCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetNatIpInfo: Retrieves runtime NAT IP information. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - router: Name of the Router resource to query for Nat IP +// information. The name should conform to RFC1035. +func (r *RoutersService) GetNatIpInfo(project string, region string, router string) *RoutersGetNatIpInfoCall { + c := &RoutersGetNatIpInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.router = router + return c +} + +// NatName sets the optional parameter "natName": Name of the nat +// service to filter the NAT IP information. If it is omitted, all nats +// for this router will be returned. Name should conform to RFC1035. +func (c *RoutersGetNatIpInfoCall) NatName(natName string) *RoutersGetNatIpInfoCall { + c.urlParams_.Set("natName", natName) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutersGetNatIpInfoCall) Fields(s ...googleapi.Field) *RoutersGetNatIpInfoCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersGetNatIpInfoCall) IfNoneMatch(entityTag string) *RoutersGetNatIpInfoCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutersGetNatIpInfoCall) Context(ctx context.Context) *RoutersGetNatIpInfoCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutersGetNatIpInfoCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutersGetNatIpInfoCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "router": c.router, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.routers.getNatIpInfo" call. +// Exactly one of *NatIpInfoResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NatIpInfoResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersGetNatIpInfoCall) Do(opts ...googleapi.CallOption) (*NatIpInfoResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &NatIpInfoResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves runtime NAT IP information.", + // "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + // "httpMethod": "GET", + // "id": "compute.routers.getNatIpInfo", + // "parameterOrder": [ + // "project", + // "region", + // "router" + // ], + // "parameters": { + // "natName": { + // "description": "Name of the nat service to filter the NAT IP information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to query for Nat IP information. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + // "response": { + // "$ref": "NatIpInfoResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.routers.getNatMappingInfo": type RoutersGetNatMappingInfoCall struct { @@ -199872,6 +200605,331 @@ func (c *ServiceAttachmentsTestIamPermissionsCall) Do(opts ...googleapi.CallOpti } +// method id "compute.snapshotSettings.get": + +type SnapshotSettingsGetCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Get snapshot settings. +// +// - project: Project ID for this request. +func (r *SnapshotSettingsService) Get(project string) *SnapshotSettingsGetCall { + c := &SnapshotSettingsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SnapshotSettingsGetCall) Fields(s ...googleapi.Field) *SnapshotSettingsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *SnapshotSettingsGetCall) IfNoneMatch(entityTag string) *SnapshotSettingsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SnapshotSettingsGetCall) Context(ctx context.Context) *SnapshotSettingsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SnapshotSettingsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SnapshotSettingsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/snapshotSettings") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.snapshotSettings.get" call. +// Exactly one of *SnapshotSettings or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *SnapshotSettings.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *SnapshotSettingsGetCall) Do(opts ...googleapi.CallOption) (*SnapshotSettings, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &SnapshotSettings{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Get snapshot settings.", + // "flatPath": "projects/{project}/global/snapshotSettings", + // "httpMethod": "GET", + // "id": "compute.snapshotSettings.get", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/snapshotSettings", + // "response": { + // "$ref": "SnapshotSettings" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.snapshotSettings.patch": + +type SnapshotSettingsPatchCall struct { + s *Service + project string + snapshotsettings *SnapshotSettings + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patch snapshot settings. +// +// - project: Project ID for this request. +func (r *SnapshotSettingsService) Patch(project string, snapshotsettings *SnapshotSettings) *SnapshotSettingsPatchCall { + c := &SnapshotSettingsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.snapshotsettings = snapshotsettings + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *SnapshotSettingsPatchCall) RequestId(requestId string) *SnapshotSettingsPatchCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// UpdateMask sets the optional parameter "updateMask": update_mask +// indicates fields to be updated as part of this request. +func (c *SnapshotSettingsPatchCall) UpdateMask(updateMask string) *SnapshotSettingsPatchCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *SnapshotSettingsPatchCall) Fields(s ...googleapi.Field) *SnapshotSettingsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *SnapshotSettingsPatchCall) Context(ctx context.Context) *SnapshotSettingsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *SnapshotSettingsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *SnapshotSettingsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshotsettings) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/snapshotSettings") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.snapshotSettings.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *SnapshotSettingsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patch snapshot settings.", + // "flatPath": "projects/{project}/global/snapshotSettings", + // "httpMethod": "PATCH", + // "id": "compute.snapshotSettings.patch", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "updateMask": { + // "description": "update_mask indicates fields to be updated as part of this request.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/snapshotSettings", + // "request": { + // "$ref": "SnapshotSettings" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.snapshots.delete": type SnapshotsDeleteCall struct { diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 8b77f64d42..1abba41e13 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -26592,6 +26592,53 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getNatIpInfo": { + "description": "Retrieves runtime NAT IP information.", + "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + "httpMethod": "GET", + "id": "compute.routers.getNatIpInfo", + "parameterOrder": [ + "project", + "region", + "router" + ], + "parameters": { + "natName": { + "description": "Name of the nat service to filter the NAT IP information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "router": { + "description": "Name of the Router resource to query for Nat IP information. The name should conform to RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + "response": { + "$ref": "NatIpInfoResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "getNatMappingInfo": { "description": "Retrieves runtime Nat mapping information of VM endpoints.", "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatMappingInfo", @@ -33788,9 +33835,32 @@ } } }, - "revision": "20230711", + "revision": "20230725", "rootUrl": "https://compute.googleapis.com/", "schemas": { + "AWSV4Signature": { + "description": "Contains the configurations necessary to generate a signature for access to private storage buckets that support Signature Version 4 for authentication. The service name for generating the authentication header will always default to 's3'.", + "id": "AWSV4Signature", + "properties": { + "accessKey": { + "description": "The access key used for s3 bucket authentication. Required for updating or creating a backend that uses AWS v4 signature authentication, but will not be returned as part of the configuration when queried with a REST API GET request. @InputOnly", + "type": "string" + }, + "accessKeyId": { + "description": "The identifier of an access key used for s3 bucket authentication.", + "type": "string" + }, + "accessKeyVersion": { + "description": "The optional version identifier for the access key. You can use this to keep track of different iterations of your access key.", + "type": "string" + }, + "originRegion": { + "description": "The name of the cloud region of your origin. This is a free-form field with the name of the region your cloud uses to host your origin. For example, \"us-east-1\" for AWS or \"us-ashburn-1\" for OCI.", + "type": "string" + } + }, + "type": "object" + }, "AcceleratorConfig": { "description": "A specification of the type and number of accelerator cards attached to the instance.", "id": "AcceleratorConfig", @@ -35334,7 +35404,7 @@ "type": "string" }, "replicaZones": { - "description": "Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone. You can't use this option with boot disks.", + "description": "Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone.", "items": { "type": "string" }, @@ -38034,6 +38104,10 @@ "description": "Per-instance properties to be set on individual instances. To be extended in the future.", "id": "BulkInsertInstanceResourcePerInstanceProperties", "properties": { + "hostname": { + "description": "Specifies the hostname of the instance. More details in: https://cloud.google.com/compute/docs/instances/custom-hostname-vm#naming_convention", + "type": "string" + }, "name": { "description": "This field is only temporary. It will be removed. Do not use it.", "type": "string" @@ -38263,9 +38337,11 @@ "description": "The type of commitment, which affects the discount rate and the eligible resources. Type MEMORY_OPTIMIZED specifies a commitment that will only apply to memory optimized machines. Type ACCELERATOR_OPTIMIZED specifies a commitment that will only apply to accelerator optimized machines.", "enum": [ "ACCELERATOR_OPTIMIZED", + "ACCELERATOR_OPTIMIZED_A3", "COMPUTE_OPTIMIZED", "COMPUTE_OPTIMIZED_C2D", "COMPUTE_OPTIMIZED_C3", + "COMPUTE_OPTIMIZED_H3", "GENERAL_PURPOSE", "GENERAL_PURPOSE_E2", "GENERAL_PURPOSE_N2", @@ -38289,6 +38365,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -41965,7 +42043,7 @@ "type": "string" }, "noAutomateDnsZone": { - "description": "This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field.", + "description": "This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. Non-PSC forwarding rules do not use this field. Once set, this field is not mutable.", "type": "boolean" }, "portRange": { @@ -42039,7 +42117,7 @@ "type": "string" }, "target": { - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. - For load balancers, see the \"Target\" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: - vpc-sc - APIs that support VPC Service Controls. - all-apis - All supported Google APIs. - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. ", + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. - For load balancers, see the \"Target\" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: - vpc-sc - APIs that support VPC Service Controls. - all-apis - All supported Google APIs. - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. The target is not mutable once set as a service attachment. ", "type": "string" } }, @@ -52529,6 +52607,72 @@ }, "type": "object" }, + "NatIpInfo": { + "description": "Contains NAT IP information of a NAT config (i.e. usage status, mode).", + "id": "NatIpInfo", + "properties": { + "natIpInfoMappings": { + "description": "A list of all NAT IPs assigned to this NAT config.", + "items": { + "$ref": "NatIpInfoNatIpInfoMapping" + }, + "type": "array" + }, + "natName": { + "description": "Name of the NAT config which the NAT IP belongs to.", + "type": "string" + } + }, + "type": "object" + }, + "NatIpInfoNatIpInfoMapping": { + "description": "Contains information of a NAT IP.", + "id": "NatIpInfoNatIpInfoMapping", + "properties": { + "mode": { + "description": "Specifies whether NAT IP is auto or manual.", + "enum": [ + "AUTO", + "MANUAL" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "natIp": { + "description": "NAT IP address. For example: 203.0.113.11.", + "type": "string" + }, + "usage": { + "description": "Specifies whether NAT IP is currently serving at least one endpoint or not.", + "enum": [ + "IN_USE", + "UNUSED" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "NatIpInfoResponse": { + "id": "NatIpInfoResponse", + "properties": { + "result": { + "description": "[Output Only] A list of NAT IP information.", + "items": { + "$ref": "NatIpInfo" + }, + "type": "array" + } + }, + "type": "object" + }, "Network": { "description": "Represents a VPC Network resource. Networks connect resources to each other and to the internet. For more information, read Virtual Private Cloud (VPC) Network.", "id": "Network", @@ -57084,7 +57228,7 @@ "type": "object" }, "Operation": { - "description": "Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/v1/globalOperations) * [Regional](/compute/docs/reference/rest/v1/regionOperations) * [Zonal](/compute/docs/reference/rest/v1/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zonalOperations` resource. For more information, read Global, Regional, and Zonal Resources.", + "description": "Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/v1/globalOperations) * [Regional](/compute/docs/reference/rest/v1/regionOperations) * [Zonal](/compute/docs/reference/rest/v1/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zoneOperations` resource. For more information, read Global, Regional, and Zonal Resources.", "id": "Operation", "properties": { "clientOperationId": { @@ -57199,6 +57343,10 @@ "description": "[Output Only] Server-defined URL for the resource.", "type": "string" }, + "setCommonInstanceMetadataOperationMetadata": { + "$ref": "SetCommonInstanceMetadataOperationMetadata", + "description": "[Output Only] If the operation is for projects.setCommonInstanceMetadata, this field will contain information on all underlying zonal actions and their state." + }, "startTime": { "description": "[Output Only] The time that this operation was started by the server. This value is in RFC3339 text format.", "type": "string" @@ -57231,7 +57379,7 @@ "type": "string" }, "user": { - "description": "[Output Only] User who requested the operation, for example: `user@example.com`.", + "description": "[Output Only] User who requested the operation, for example: `user@example.com` or `alice_smith_identifier (global/workforcePools/example-com-us-employees)`.", "type": "string" }, "warnings": { @@ -65870,6 +66018,40 @@ "" ], "type": "string" + }, + "thresholdConfigs": { + "description": "Configuration options for layer7 adaptive protection for various customizable thresholds.", + "items": { + "$ref": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig": { + "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig", + "properties": { + "autoDeployConfidenceThreshold": { + "format": "float", + "type": "number" + }, + "autoDeployExpirationSec": { + "format": "int32", + "type": "integer" + }, + "autoDeployImpactedBaselineThreshold": { + "format": "float", + "type": "number" + }, + "autoDeployLoadThreshold": { + "format": "float", + "type": "number" + }, + "name": { + "description": "The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "type": "string" } }, "type": "object" @@ -66444,6 +66626,10 @@ "description": "The authentication and authorization settings for a BackendService.", "id": "SecuritySettings", "properties": { + "awsV4Authentication": { + "$ref": "AWSV4Signature", + "description": "The configuration needed to generate a signature for access to private storage buckets that support AWS's Signature Version 4 for authentication. Allowed only for INTERNET_IP_PORT and INTERNET_FQDN_PORT NEG backends." + }, "clientTlsPolicy": { "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted.", "type": "string" @@ -67147,6 +67333,53 @@ }, "type": "object" }, + "SetCommonInstanceMetadataOperationMetadata": { + "id": "SetCommonInstanceMetadataOperationMetadata", + "properties": { + "clientOperationId": { + "description": "[Output Only] The client operation id.", + "type": "string" + }, + "perLocationOperations": { + "additionalProperties": { + "$ref": "SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo" + }, + "description": "[Output Only] Status information per location (location name is key). Example key: zones/us-central1-a", + "type": "object" + } + }, + "type": "object" + }, + "SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo": { + "id": "SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo", + "properties": { + "error": { + "$ref": "Status", + "description": "[Output Only] If state is `ABANDONED` or `FAILED`, this field is populated." + }, + "state": { + "description": "[Output Only] Status of the action, which can be one of the following: `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`.", + "enum": [ + "ABANDONED", + "DONE", + "FAILED", + "PROPAGATED", + "PROPAGATING", + "UNSPECIFIED" + ], + "enumDescriptions": [ + "Operation not tracked in this location e.g. zone is marked as DOWN.", + "Operation has completed successfully.", + "Operation is in an error state.", + "Operation is confirmed to be in the location.", + "Operation is not yet confirmed to have been created in the location.", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "ShareSettings": { "description": "The share setting for reservations and sole tenancy node groups.", "id": "ShareSettings", @@ -69046,6 +69279,33 @@ }, "type": "object" }, + "Status": { + "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", + "id": "Status", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.", + "type": "string" + } + }, + "type": "object" + }, "Subnetwork": { "description": "Represents a Subnetwork resource. A subnetwork (also known as a subnet) is a logical partition of a Virtual Private Cloud network with one primary IP range and zero or more secondary IP ranges. For more information, read Virtual Private Cloud (VPC) Network.", "id": "Subnetwork", diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 811675da12..ccac09b69b 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -1265,6 +1265,55 @@ type ZonesService struct { s *Service } +// AWSV4Signature: Contains the configurations necessary to generate a +// signature for access to private storage buckets that support +// Signature Version 4 for authentication. The service name for +// generating the authentication header will always default to 's3'. +type AWSV4Signature struct { + // AccessKey: The access key used for s3 bucket authentication. Required + // for updating or creating a backend that uses AWS v4 signature + // authentication, but will not be returned as part of the configuration + // when queried with a REST API GET request. @InputOnly + AccessKey string `json:"accessKey,omitempty"` + + // AccessKeyId: The identifier of an access key used for s3 bucket + // authentication. + AccessKeyId string `json:"accessKeyId,omitempty"` + + // AccessKeyVersion: The optional version identifier for the access key. + // You can use this to keep track of different iterations of your access + // key. + AccessKeyVersion string `json:"accessKeyVersion,omitempty"` + + // OriginRegion: The name of the cloud region of your origin. This is a + // free-form field with the name of the region your cloud uses to host + // your origin. For example, "us-east-1" for AWS or "us-ashburn-1" for + // OCI. + OriginRegion string `json:"originRegion,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AccessKey") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccessKey") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AWSV4Signature) MarshalJSON() ([]byte, error) { + type NoMethod AWSV4Signature + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AcceleratorConfig: A specification of the type and number of // accelerator cards attached to the instance. type AcceleratorConfig struct { @@ -3331,8 +3380,7 @@ type AttachedDiskInitializeParams struct { // ReplicaZones: Required for each regional disk associated with the // instance. Specify the URLs of the zones where the disk should be // replicated to. You must provide exactly two replica zones, and one - // zone must be the same as the instance zone. You can't use this option - // with boot disks. + // zone must be the same as the instance zone. ReplicaZones []string `json:"replicaZones,omitempty"` // ResourceManagerTags: Resource manager tags to be bound to the disk. @@ -7581,11 +7629,15 @@ func (s *BulkInsertInstanceResource) MarshalJSON() ([]byte, error) { // properties to be set on individual instances. To be extended in the // future. type BulkInsertInstanceResourcePerInstanceProperties struct { + // Hostname: Specifies the hostname of the instance. More details in: + // https://cloud.google.com/compute/docs/instances/custom-hostname-vm#naming_convention + Hostname string `json:"hostname,omitempty"` + // Name: This field is only temporary. It will be removed. Do not use // it. Name string `json:"name,omitempty"` - // ForceSendFields is a list of field names (e.g. "Name") to + // ForceSendFields is a list of field names (e.g. "Hostname") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -7593,8 +7645,8 @@ type BulkInsertInstanceResourcePerInstanceProperties struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Name") to include in API - // requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Hostname") to include in + // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -7879,9 +7931,11 @@ type Commitment struct { // // Possible values: // "ACCELERATOR_OPTIMIZED" + // "ACCELERATOR_OPTIMIZED_A3" // "COMPUTE_OPTIMIZED" // "COMPUTE_OPTIMIZED_C2D" // "COMPUTE_OPTIMIZED_C3" + // "COMPUTE_OPTIMIZED_H3" // "GENERAL_PURPOSE" // "GENERAL_PURPOSE_E2" // "GENERAL_PURPOSE_N2" @@ -13176,7 +13230,8 @@ type ForwardingRule struct { // NoAutomateDnsZone: This is used in PSC consumer ForwardingRule to // control whether it should try to auto-generate a DNS zone or not. - // Non-PSC forwarding rules do not use this field. + // Non-PSC forwarding rules do not use this field. Once set, this field + // is not mutable. NoAutomateDnsZone bool `json:"noAutomateDnsZone,omitempty"` // PortRange: This field can only be used: - If IPProtocol is one of @@ -13287,7 +13342,8 @@ type ForwardingRule struct { // vpc-sc - APIs that support VPC Service Controls. - all-apis - All // supported Google APIs. - For Private Service Connect forwarding rules // that forward traffic to managed services, the target must be a - // service attachment. + // service attachment. The target is not mutable once set as a service + // attachment. Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -28394,6 +28450,113 @@ func (s *NamedPort) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// NatIpInfo: Contains NAT IP information of a NAT config (i.e. usage +// status, mode). +type NatIpInfo struct { + // NatIpInfoMappings: A list of all NAT IPs assigned to this NAT config. + NatIpInfoMappings []*NatIpInfoNatIpInfoMapping `json:"natIpInfoMappings,omitempty"` + + // NatName: Name of the NAT config which the NAT IP belongs to. + NatName string `json:"natName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NatIpInfoMappings") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NatIpInfoMappings") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *NatIpInfo) MarshalJSON() ([]byte, error) { + type NoMethod NatIpInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// NatIpInfoNatIpInfoMapping: Contains information of a NAT IP. +type NatIpInfoNatIpInfoMapping struct { + // Mode: Specifies whether NAT IP is auto or manual. + // + // Possible values: + // "AUTO" + // "MANUAL" + Mode string `json:"mode,omitempty"` + + // NatIp: NAT IP address. For example: 203.0.113.11. + NatIp string `json:"natIp,omitempty"` + + // Usage: Specifies whether NAT IP is currently serving at least one + // endpoint or not. + // + // Possible values: + // "IN_USE" + // "UNUSED" + Usage string `json:"usage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Mode") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Mode") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NatIpInfoNatIpInfoMapping) MarshalJSON() ([]byte, error) { + type NoMethod NatIpInfoNatIpInfoMapping + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type NatIpInfoResponse struct { + // Result: [Output Only] A list of NAT IP information. + Result []*NatIpInfo `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NatIpInfoResponse) MarshalJSON() ([]byte, error) { + type NoMethod NatIpInfoResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Network: Represents a VPC Network resource. Networks connect // resources to each other and to the internet. For more information, // read Virtual Private Cloud (VPC) Network. @@ -34433,7 +34596,7 @@ func (s *NotificationEndpointListWarningData) MarshalJSON() ([]byte, error) { // regional or zonal. - For global operations, use the // `globalOperations` resource. - For regional operations, use the // `regionOperations` resource. - For zonal operations, use the -// `zonalOperations` resource. For more information, read Global, +// `zoneOperations` resource. For more information, read Global, // Regional, and Zonal Resources. type Operation struct { // ClientOperationId: [Output Only] The value of `requestId` if you @@ -34503,6 +34666,11 @@ type Operation struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SetCommonInstanceMetadataOperationMetadata: [Output Only] If the + // operation is for projects.setCommonInstanceMetadata, this field will + // contain information on all underlying zonal actions and their state. + SetCommonInstanceMetadataOperationMetadata *SetCommonInstanceMetadataOperationMetadata `json:"setCommonInstanceMetadataOperationMetadata,omitempty"` + // StartTime: [Output Only] The time that this operation was started by // the server. This value is in RFC3339 text format. StartTime string `json:"startTime,omitempty"` @@ -34530,7 +34698,8 @@ type Operation struct { TargetLink string `json:"targetLink,omitempty"` // User: [Output Only] User who requested the operation, for example: - // `user@example.com`. + // `user@example.com` or `alice_smith_identifier + // (global/workforcePools/example-com-us-employees)`. User string `json:"user,omitempty"` // Warnings: [Output Only] If warning messages are generated during @@ -46269,6 +46438,10 @@ type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { // "STANDARD" RuleVisibility string `json:"ruleVisibility,omitempty"` + // ThresholdConfigs: Configuration options for layer7 adaptive + // protection for various customizable thresholds. + ThresholdConfigs []*SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig `json:"thresholdConfigs,omitempty"` + // ForceSendFields is a list of field names (e.g. "Enable") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -46292,6 +46465,62 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig) MarshalJ return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig struct { + AutoDeployConfidenceThreshold float64 `json:"autoDeployConfidenceThreshold,omitempty"` + + AutoDeployExpirationSec int64 `json:"autoDeployExpirationSec,omitempty"` + + AutoDeployImpactedBaselineThreshold float64 `json:"autoDeployImpactedBaselineThreshold,omitempty"` + + AutoDeployLoadThreshold float64 `json:"autoDeployLoadThreshold,omitempty"` + + // Name: The name must be 1-63 characters long, and comply with RFC1035. + // The name must be unique within the security policy. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AutoDeployConfidenceThreshold") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. + // "AutoDeployConfidenceThreshold") to include in API requests with the + // JSON null value. By default, fields with empty values are omitted + // from API requests. However, any field with an empty value appearing + // in NullFields will be sent to the server as null. It is an error if a + // field in this list has a non-empty value. This may be used to include + // null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig) UnmarshalJSON(data []byte) error { + type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig + var s1 struct { + AutoDeployConfidenceThreshold gensupport.JSONFloat64 `json:"autoDeployConfidenceThreshold"` + AutoDeployImpactedBaselineThreshold gensupport.JSONFloat64 `json:"autoDeployImpactedBaselineThreshold"` + AutoDeployLoadThreshold gensupport.JSONFloat64 `json:"autoDeployLoadThreshold"` + *NoMethod + } + s1.NoMethod = (*NoMethod)(s) + if err := json.Unmarshal(data, &s1); err != nil { + return err + } + s.AutoDeployConfidenceThreshold = float64(s1.AutoDeployConfidenceThreshold) + s.AutoDeployImpactedBaselineThreshold = float64(s1.AutoDeployImpactedBaselineThreshold) + s.AutoDeployLoadThreshold = float64(s1.AutoDeployLoadThreshold) + return nil +} + type SecurityPolicyAdvancedOptionsConfig struct { // JsonCustomConfig: Custom configuration to apply the JSON parsing. // Only applicable when json_parsing is set to STANDARD. @@ -47235,6 +47464,12 @@ func (s *SecurityPolicyRuleRedirectOptions) MarshalJSON() ([]byte, error) { // SecuritySettings: The authentication and authorization settings for a // BackendService. type SecuritySettings struct { + // AwsV4Authentication: The configuration needed to generate a signature + // for access to private storage buckets that support AWS's Signature + // Version 4 for authentication. Allowed only for INTERNET_IP_PORT and + // INTERNET_FQDN_PORT NEG backends. + AwsV4Authentication *AWSV4Signature `json:"awsV4Authentication,omitempty"` + // ClientTlsPolicy: Optional. A URL referring to a // networksecurity.ClientTlsPolicy resource that describes how clients // should authenticate with this service's backends. clientTlsPolicy @@ -47258,15 +47493,15 @@ type SecuritySettings struct { // attached clientTlsPolicy with clientCertificate (mTLS mode). SubjectAltNames []string `json:"subjectAltNames,omitempty"` - // ForceSendFields is a list of field names (e.g. "ClientTlsPolicy") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "AwsV4Authentication") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ClientTlsPolicy") to + // NullFields is a list of field names (e.g. "AwsV4Authentication") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -48188,6 +48423,81 @@ func (s *ServiceAttachmentsScopedListWarningData) MarshalJSON() ([]byte, error) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SetCommonInstanceMetadataOperationMetadata struct { + // ClientOperationId: [Output Only] The client operation id. + ClientOperationId string `json:"clientOperationId,omitempty"` + + // PerLocationOperations: [Output Only] Status information per location + // (location name is key). Example key: zones/us-central1-a + PerLocationOperations map[string]SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo `json:"perLocationOperations,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ClientOperationId") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ClientOperationId") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SetCommonInstanceMetadataOperationMetadata) MarshalJSON() ([]byte, error) { + type NoMethod SetCommonInstanceMetadataOperationMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo struct { + // Error: [Output Only] If state is `ABANDONED` or `FAILED`, this field + // is populated. + Error *Status `json:"error,omitempty"` + + // State: [Output Only] Status of the action, which can be one of the + // following: `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or + // `DONE`. + // + // Possible values: + // "ABANDONED" - Operation not tracked in this location e.g. zone is + // marked as DOWN. + // "DONE" - Operation has completed successfully. + // "FAILED" - Operation is in an error state. + // "PROPAGATED" - Operation is confirmed to be in the location. + // "PROPAGATING" - Operation is not yet confirmed to have been created + // in the location. + // "UNSPECIFIED" + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Error") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Error") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo) MarshalJSON() ([]byte, error) { + type NoMethod SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ShareSettings: The share setting for reservations and sole tenancy // node groups. type ShareSettings struct { @@ -50729,6 +51039,50 @@ func (s *StatefulPolicyPreservedStateDiskDevice) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// Status: The `Status` type defines a logical error model that is +// suitable for different programming environments, including REST APIs +// and RPC APIs. It is used by gRPC (https://github.com/grpc). Each +// `Status` message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the API Design Guide +// (https://cloud.google.com/apis/design/errors). +type Status struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. Any user-facing error message should be localized and sent + // in the google.rpc.Status.details field, or localized by the client. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Status) MarshalJSON() ([]byte, error) { + type NoMethod Status + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Subnetwork: Represents a Subnetwork resource. A subnetwork (also // known as a subnet) is a logical partition of a Virtual Private Cloud // network with one primary IP range and zero or more secondary IP @@ -171967,6 +172321,191 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { } +// method id "compute.routers.getNatIpInfo": + +type RoutersGetNatIpInfoCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetNatIpInfo: Retrieves runtime NAT IP information. +// +// - project: Project ID for this request. +// - region: Name of the region for this request. +// - router: Name of the Router resource to query for Nat IP +// information. The name should conform to RFC1035. +func (r *RoutersService) GetNatIpInfo(project string, region string, router string) *RoutersGetNatIpInfoCall { + c := &RoutersGetNatIpInfoCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.router = router + return c +} + +// NatName sets the optional parameter "natName": Name of the nat +// service to filter the NAT IP information. If it is omitted, all nats +// for this router will be returned. Name should conform to RFC1035. +func (c *RoutersGetNatIpInfoCall) NatName(natName string) *RoutersGetNatIpInfoCall { + c.urlParams_.Set("natName", natName) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutersGetNatIpInfoCall) Fields(s ...googleapi.Field) *RoutersGetNatIpInfoCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersGetNatIpInfoCall) IfNoneMatch(entityTag string) *RoutersGetNatIpInfoCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutersGetNatIpInfoCall) Context(ctx context.Context) *RoutersGetNatIpInfoCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RoutersGetNatIpInfoCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RoutersGetNatIpInfoCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "router": c.router, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.routers.getNatIpInfo" call. +// Exactly one of *NatIpInfoResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *NatIpInfoResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersGetNatIpInfoCall) Do(opts ...googleapi.CallOption) (*NatIpInfoResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &NatIpInfoResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves runtime NAT IP information.", + // "flatPath": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + // "httpMethod": "GET", + // "id": "compute.routers.getNatIpInfo", + // "parameterOrder": [ + // "project", + // "region", + // "router" + // ], + // "parameters": { + // "natName": { + // "description": "Name of the nat service to filter the NAT IP information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to query for Nat IP information. The name should conform to RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/routers/{router}/getNatIpInfo", + // "response": { + // "$ref": "NatIpInfoResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.routers.getNatMappingInfo": type RoutersGetNatMappingInfoCall struct { diff --git a/vendor/google.golang.org/api/container/v1/container-api.json b/vendor/google.golang.org/api/container/v1/container-api.json index 94351fb29f..871d6e5e40 100644 --- a/vendor/google.golang.org/api/container/v1/container-api.json +++ b/vendor/google.golang.org/api/container/v1/container-api.json @@ -2540,7 +2540,7 @@ } } }, - "revision": "20230629", + "revision": "20230723", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2669,6 +2669,7 @@ }, "kubernetesDashboard": { "$ref": "KubernetesDashboard", + "deprecated": true, "description": "Configuration for the Kubernetes Dashboard. This addon is deprecated, and will be disabled in 1.15. It is recommended to use the Cloud Console to manage and monitor your Kubernetes clusters, workloads and applications. For more information, see: https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards" }, "networkPolicyConfig": { @@ -2839,6 +2840,7 @@ "description": "Specifies the node management options for NAP created node-pools." }, "minCpuPlatform": { + "deprecated": true, "description": "Deprecated. Minimum CPU platform to be used for NAP created node pools. The instance may be scheduled on the specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such as minCpuPlatform: Intel Haswell or minCpuPlatform: Intel Sandy Bridge. For more information, read [how to specify min CPU platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform). This field is deprecated, min_cpu_platform should be specified using `cloud.google.com/requested-min-cpu-platform` label selector on the pod. To unset the min cpu platform field pass \"automatic\" as field value.", "type": "string" }, @@ -2896,6 +2898,7 @@ "id": "BinaryAuthorization", "properties": { "enabled": { + "deprecated": true, "description": "This field is deprecated. Leave this unset and instead configure BinaryAuthorization using evaluation_mode. If evaluation_mode is set to anything other than EVALUATION_MODE_UNSPECIFIED, this field is ignored.", "type": "boolean" }, @@ -2994,14 +2997,17 @@ "type": "string" }, "operationId": { + "deprecated": true, "description": "Deprecated. The server-assigned `name` of the operation. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the operation resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -3129,11 +3135,13 @@ "type": "string" }, "currentNodeCount": { + "deprecated": true, "description": "[Output only] The number of nodes currently in the cluster. Deprecated. Call Kubernetes API directly to retrieve node information.", "format": "int32", "type": "integer" }, "currentNodeVersion": { + "deprecated": true, "description": "[Output only] Deprecated, use [NodePools.version](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools) instead. The current version of the node software components. If they are currently at multiple versions because they're in the process of being upgraded, this reflects the minimum version of all nodes.", "type": "string" }, @@ -3191,11 +3199,13 @@ "type": "string" }, "initialNodeCount": { + "deprecated": true, "description": "The number of nodes to create in this cluster. You must ensure that your Compute Engine [resource quota](https://cloud.google.com/compute/quotas) is sufficient for this number of instances. You must also have available firewall and routes quota. For requests, this field should only be used in lieu of a \"node_pool\" object, since this configuration (along with the \"node_config\") will be used to create a \"NodePool\" object with an auto-generated name. Do not use this and a node_pool at the same time. This field is deprecated, use node_pool.initial_node_count instead.", "format": "int32", "type": "integer" }, "instanceGroupUrls": { + "deprecated": true, "description": "Deprecated. Use node_pools.instance_group_urls.", "items": { "type": "string" @@ -3275,6 +3285,7 @@ }, "nodeConfig": { "$ref": "NodeConfig", + "deprecated": true, "description": "Parameters used in creating the cluster's nodes. For requests, this field should only be used in lieu of a \"node_pool\" object, since this configuration (along with the \"initial_node_count\") will be used to create a \"NodePool\" object with an auto-generated name. Do not use this and a node_pool at the same time. For responses, this field will be populated with the node configuration of the first node pool. (For configuration of each node pool, see `node_pool.config`) If unspecified, the defaults are used. This field is deprecated, use node_pool.config instead." }, "nodeIpv4CidrSize": { @@ -3359,6 +3370,7 @@ "type": "string" }, "statusMessage": { + "deprecated": true, "description": "[Output only] Deprecated. Use conditions instead. Additional information about the current status of this cluster, if available.", "type": "string" }, @@ -3379,6 +3391,7 @@ "description": "Configuration for the use of Kubernetes Service Accounts in GCP IAM policies." }, "zone": { + "deprecated": true, "description": "[Output only] The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field is deprecated, use location instead.", "type": "string" } @@ -3690,6 +3703,7 @@ "id": "CompleteIPRotationRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -3698,10 +3712,12 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -3771,10 +3787,12 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the parent field.", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "type": "string" } @@ -3786,6 +3804,7 @@ "id": "CreateNodePoolRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the parent field.", "type": "string" }, @@ -3798,10 +3817,12 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the parent field.", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the parent field.", "type": "string" } @@ -4245,6 +4266,7 @@ "readOnly": true }, "clusterIpv4Cidr": { + "deprecated": true, "description": "This field is deprecated, use cluster_ipv4_cidr_block.", "type": "string" }, @@ -4281,6 +4303,7 @@ "type": "string" }, "nodeIpv4Cidr": { + "deprecated": true, "description": "This field is deprecated, use node_ipv4_cidr_block.", "type": "string" }, @@ -4293,6 +4316,7 @@ "description": "[PRIVATE FIELD] Pod CIDR size overprovisioning config for the cluster. Pod CIDR size per node depends on max_pods_per_node. By default, the value of max_pods_per_node is doubled and then rounded off to next power of 2 to get the size of pod CIDR block per node. Example: max_pods_per_node of 30 would result in 64 IPs (/26). This config can disable the doubling of IPs (we still round off to next power of 2) Example: max_pods_per_node of 30 will result in 32 IPs (/27) when overprovisioning is disabled." }, "servicesIpv4Cidr": { + "deprecated": true, "description": "This field is deprecated, use services_ipv4_cidr_block.", "type": "string" }, @@ -4714,10 +4738,12 @@ "type": "string" }, "password": { + "deprecated": true, "description": "The password to use for HTTP basic authentication to the master endpoint. Because the master endpoint is open to the Internet, you should create a strong password. If a password is provided for cluster creation, username must be non-empty. Warning: basic authentication is deprecated, and will be removed in GKE control plane versions 1.19 and newer. For a list of recommended authentication methods, see: https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication", "type": "string" }, "username": { + "deprecated": true, "description": "The username to use for HTTP basic authentication to the master endpoint. For clusters v1.6.0 and later, basic authentication can be disabled by leaving username unspecified (or setting it to the empty string). Warning: basic authentication is deprecated, and will be removed in GKE control plane versions 1.19 and newer. For a list of recommended authentication methods, see: https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication", "type": "string" } @@ -4806,14 +4832,26 @@ "SYSTEM_COMPONENTS", "APISERVER", "SCHEDULER", - "CONTROLLER_MANAGER" + "CONTROLLER_MANAGER", + "STORAGE", + "HPA", + "POD", + "DAEMONSET", + "DEPLOYMENT", + "STATEFULSET" ], "enumDescriptions": [ "Default value. This shouldn't be used.", "system components", "kube-apiserver", "kube-scheduler", - "kube-controller-manager" + "kube-controller-manager", + "Storage", + "Horizontal Pod Autoscaling", + "Pod", + "DaemonSet", + "Deployment", + "Statefulset" ], "type": "string" }, @@ -5412,6 +5450,7 @@ "type": "string" }, "statusMessage": { + "deprecated": true, "description": "[Output only] Deprecated. Use conditions instead. Additional information about the current status of this node pool instance, if available.", "type": "string" }, @@ -5574,6 +5613,7 @@ "id": "Operation", "properties": { "clusterConditions": { + "deprecated": true, "description": "Which conditions caused the current cluster state. Deprecated. Use field error instead.", "items": { "$ref": "StatusCondition" @@ -5601,6 +5641,7 @@ "type": "string" }, "nodepoolConditions": { + "deprecated": true, "description": "Which conditions caused the current node pool state. Deprecated. Use field error instead.", "items": { "$ref": "StatusCondition" @@ -5629,6 +5670,26 @@ "SET_MAINTENANCE_POLICY", "RESIZE_CLUSTER" ], + "enumDeprecated": [ + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + false, + true, + true, + true, + false, + true, + true, + false + ], "enumDescriptions": [ "Not set.", "The cluster is being created. The cluster should be assumed to be unusable until the operation finishes. In the event of the operation failing, the cluster will enter the ERROR state and eventually be deleted.", @@ -5683,6 +5744,7 @@ "type": "string" }, "statusMessage": { + "deprecated": true, "description": "Output only. If an error has occurred, a textual description of the error. Deprecated. Use the field error instead.", "readOnly": true, "type": "string" @@ -5692,6 +5754,7 @@ "type": "string" }, "zone": { + "deprecated": true, "description": "The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the operation is taking place. This field is deprecated, use location instead.", "type": "string" } @@ -6027,6 +6090,7 @@ "id": "RollbackNodePoolUpgradeRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to rollback. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6035,10 +6099,12 @@ "type": "string" }, "nodePoolId": { + "deprecated": true, "description": "Deprecated. The name of the node pool to rollback. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6047,6 +6113,7 @@ "type": "boolean" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6226,6 +6293,7 @@ "description": "Required. The desired configurations for the various addons available to run in the cluster." }, "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6234,10 +6302,12 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6249,6 +6319,7 @@ "id": "SetLabelsRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6261,6 +6332,7 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6272,6 +6344,7 @@ "type": "object" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6283,6 +6356,7 @@ "id": "SetLegacyAbacRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6295,10 +6369,12 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6310,6 +6386,7 @@ "id": "SetLocationsRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6325,10 +6402,12 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6340,6 +6419,7 @@ "id": "SetLoggingServiceRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6352,10 +6432,12 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6410,6 +6492,7 @@ "type": "string" }, "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6418,6 +6501,7 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6426,6 +6510,7 @@ "description": "Required. A description of the update." }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6437,6 +6522,7 @@ "id": "SetMonitoringServiceRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6449,10 +6535,12 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6464,6 +6552,7 @@ "id": "SetNetworkPolicyRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6476,10 +6565,12 @@ "description": "Required. Configuration options for the NetworkPolicy feature." }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6495,6 +6586,7 @@ "description": "Required. Autoscaling configuration for the node pool." }, "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6503,14 +6595,17 @@ "type": "string" }, "nodePoolId": { + "deprecated": true, "description": "Deprecated. The name of the node pool to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6522,6 +6617,7 @@ "id": "SetNodePoolManagementRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6534,14 +6630,17 @@ "type": "string" }, "nodePoolId": { + "deprecated": true, "description": "Deprecated. The name of the node pool to update. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6553,6 +6652,7 @@ "id": "SetNodePoolSizeRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to update. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6566,14 +6666,17 @@ "type": "integer" }, "nodePoolId": { + "deprecated": true, "description": "Deprecated. The name of the node pool to update. This field has been deprecated and replaced by the name field.", "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6647,6 +6750,7 @@ "id": "StartIPRotationRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6655,6 +6759,7 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6663,6 +6768,7 @@ "type": "boolean" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6743,6 +6849,7 @@ "type": "string" }, "code": { + "deprecated": true, "description": "Machine-friendly representation of the condition Deprecated. Use canonical_code instead.", "enum": [ "UNKNOWN", @@ -6797,6 +6904,7 @@ "id": "UpdateClusterRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6805,6 +6913,7 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6813,6 +6922,7 @@ "description": "Required. A description of the update." }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6835,6 +6945,7 @@ "id": "UpdateMasterRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6847,10 +6958,12 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } @@ -6862,6 +6975,7 @@ "id": "UpdateNodePoolRequest", "properties": { "clusterId": { + "deprecated": true, "description": "Deprecated. The name of the cluster to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6921,6 +7035,7 @@ "description": "Node network config." }, "nodePoolId": { + "deprecated": true, "description": "Deprecated. The name of the node pool to upgrade. This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6929,6 +7044,7 @@ "type": "string" }, "projectId": { + "deprecated": true, "description": "Deprecated. The Google Developers Console [project ID or project number](https://cloud.google.com/resource-manager/docs/creating-managing-projects). This field has been deprecated and replaced by the name field.", "type": "string" }, @@ -6957,6 +7073,7 @@ "description": "The desired workload metadata config for the node pool." }, "zone": { + "deprecated": true, "description": "Deprecated. The name of the Google Compute Engine [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster resides. This field has been deprecated and replaced by the name field.", "type": "string" } diff --git a/vendor/google.golang.org/api/container/v1/container-gen.go b/vendor/google.golang.org/api/container/v1/container-gen.go index a561fd62c4..388ecf1d50 100644 --- a/vendor/google.golang.org/api/container/v1/container-gen.go +++ b/vendor/google.golang.org/api/container/v1/container-gen.go @@ -4166,6 +4166,12 @@ type MonitoringComponentConfig struct { // "APISERVER" - kube-apiserver // "SCHEDULER" - kube-scheduler // "CONTROLLER_MANAGER" - kube-controller-manager + // "STORAGE" - Storage + // "HPA" - Horizontal Pod Autoscaling + // "POD" - Pod + // "DAEMONSET" - DaemonSet + // "DEPLOYMENT" - Deployment + // "STATEFULSET" - Statefulset EnableComponents []string `json:"enableComponents,omitempty"` // ForceSendFields is a list of field names (e.g. "EnableComponents") to diff --git a/vendor/google.golang.org/api/dns/v1/dns-api.json b/vendor/google.golang.org/api/dns/v1/dns-api.json index d3667c41fc..d46f0d718e 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-api.json +++ b/vendor/google.golang.org/api/dns/v1/dns-api.json @@ -1824,7 +1824,7 @@ } } }, - "revision": "20230721", + "revision": "20230801", "rootUrl": "https://dns.googleapis.com/", "schemas": { "Change": { @@ -3133,11 +3133,13 @@ "description": "The type of Load Balancer specified by this target. Must match the configuration of the Load Balancer located at the LoadBalancerTarget's IP address/port and region.", "enum": [ "none", + "globalL7ilb", "regionalL4ilb", "regionalL7ilb" ], "enumDescriptions": [ "", + "Cross-region internal Application Load Balancer", "Regional internal passthrough Network Load Balancer", "Regional internal Application Load Balancer" ], diff --git a/vendor/google.golang.org/api/dns/v1/dns-gen.go b/vendor/google.golang.org/api/dns/v1/dns-gen.go index edc8b0f729..ce62a19141 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v1/dns-gen.go @@ -2510,6 +2510,7 @@ type RRSetRoutingPolicyLoadBalancerTarget struct { // // Possible values: // "none" + // "globalL7ilb" - Cross-region internal Application Load Balancer // "regionalL4ilb" - Regional internal passthrough Network Load // Balancer // "regionalL7ilb" - Regional internal Application Load Balancer diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go index cecbb9ba11..6923d3a716 100644 --- a/vendor/google.golang.org/api/internal/cba.go +++ b/vendor/google.golang.org/api/internal/cba.go @@ -91,16 +91,10 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) { s2aMTLSEndpoint: "", } - // Check the env to determine whether to use S2A. - if !isGoogleS2AEnabled() { + if !shouldUseS2A(clientCertSource, settings) { return &defaultTransportConfig, nil } - // If client cert is found, use that over S2A. - // If MTLS is not enabled for the endpoint, skip S2A. - if clientCertSource != nil || !mtlsEndpointEnabledForS2A() { - return &defaultTransportConfig, nil - } s2aMTLSEndpoint := settings.DefaultMTLSEndpoint // If there is endpoint override, honor it. if settings.Endpoint != "" { @@ -118,10 +112,6 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) { }, nil } -func isGoogleS2AEnabled() bool { - return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" -} - // getClientCertificateSource returns a default client certificate source, if // not provided by the user. // @@ -275,8 +265,36 @@ func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, fun return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil } +func shouldUseS2A(clientCertSource cert.Source, settings *DialSettings) bool { + // If client cert is found, use that over S2A. + if clientCertSource != nil { + return false + } + // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A. + if !isGoogleS2AEnabled() { + return false + } + // If DefaultMTLSEndpoint is not set, skip S2A. + if settings.DefaultMTLSEndpoint == "" { + return false + } + // If MTLS is not enabled for this endpoint, skip S2A. + if !mtlsEndpointEnabledForS2A() { + return false + } + // If custom HTTP client is provided, skip S2A. + if settings.HTTPClient != nil { + return false + } + return true +} + // mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection. var mtlsEndpointEnabledForS2A = func() bool { // TODO(xmenxk): determine this via discovery config. return true } + +func isGoogleS2AEnabled() bool { + return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" +} diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 6fdda3b791..1a2b858c84 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.134.0" +const Version = "0.136.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index 238f14617a..a070fc30cb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# cloud.google.com/go/compute v1.20.1 +# cloud.google.com/go/compute v1.23.0 ## explicit; go 1.19 cloud.google.com/go/compute/internal # cloud.google.com/go/compute/metadata v0.2.3 @@ -94,7 +94,7 @@ github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs github.com/aws/amazon-ec2-instance-selector/v2/pkg/sorter -# github.com/aws/aws-sdk-go v1.44.311 +# github.com/aws/aws-sdk-go v1.44.322 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -251,7 +251,7 @@ github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/digitalocean/godo v1.100.0 +# github.com/digitalocean/godo v1.101.0 ## explicit; go 1.18 github.com/digitalocean/godo github.com/digitalocean/godo/metrics @@ -539,6 +539,9 @@ github.com/gregjones/httpcache # github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap +# github.com/hashicorp/go-cleanhttp v0.5.2 +## explicit; go 1.13 +github.com/hashicorp/go-cleanhttp # github.com/hashicorp/go-immutable-radix v1.3.1 ## explicit github.com/hashicorp/go-immutable-radix @@ -548,6 +551,9 @@ github.com/hashicorp/go-msgpack/codec # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror +# github.com/hashicorp/go-retryablehttp v0.7.4 +## explicit; go 1.13 +github.com/hashicorp/go-retryablehttp # github.com/hashicorp/go-sockaddr v1.0.2 ## explicit github.com/hashicorp/go-sockaddr @@ -569,7 +575,7 @@ github.com/hashicorp/hcl/json/token # github.com/hashicorp/memberlist v0.3.1 ## explicit; go 1.12 github.com/hashicorp/memberlist -# github.com/hetznercloud/hcloud-go v1.48.0 +# github.com/hetznercloud/hcloud-go v1.49.0 ## explicit; go 1.19 github.com/hetznercloud/hcloud-go/hcloud github.com/hetznercloud/hcloud-go/hcloud/internal/instrumentation @@ -733,7 +739,7 @@ github.com/peterbourgon/diskv # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/pkg/sftp v1.13.5 +# github.com/pkg/sftp v1.13.6 ## explicit; go 1.15 github.com/pkg/sftp github.com/pkg/sftp/internal/encoding/ssh/filexfer @@ -794,7 +800,7 @@ github.com/sergi/go-diff/diffmatchpatch # github.com/shopspring/decimal v1.3.1 ## explicit; go 1.13 github.com/shopspring/decimal -# github.com/sirupsen/logrus v1.9.0 +# github.com/sirupsen/logrus v1.9.1 ## explicit; go 1.13 github.com/sirupsen/logrus # github.com/spf13/afero v1.9.5 @@ -910,7 +916,7 @@ go.starlark.net/syntax # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# golang.org/x/crypto v0.11.0 +# golang.org/x/crypto v0.12.0 ## explicit; go 1.17 golang.org/x/crypto/bcrypt golang.org/x/crypto/blake2b @@ -935,7 +941,7 @@ golang.org/x/crypto/scrypt golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 +# golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb ## explicit; go 1.20 golang.org/x/exp/constraints # golang.org/x/mod v0.11.0 @@ -943,7 +949,7 @@ golang.org/x/exp/constraints golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.12.0 +# golang.org/x/net v0.14.0 ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context @@ -960,8 +966,8 @@ golang.org/x/net/ipv4 golang.org/x/net/ipv6 golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.10.0 -## explicit; go 1.17 +# golang.org/x/oauth2 v0.11.0 +## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/google @@ -973,7 +979,7 @@ golang.org/x/oauth2/jwt ## explicit; go 1.17 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.10.0 +# golang.org/x/sys v0.11.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -983,10 +989,10 @@ golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc/eventlog -# golang.org/x/term v0.10.0 +# golang.org/x/term v0.11.0 ## explicit; go 1.17 golang.org/x/term -# golang.org/x/text v0.11.0 +# golang.org/x/text v0.12.0 ## explicit; go 1.17 golang.org/x/text/encoding golang.org/x/text/encoding/internal @@ -1026,7 +1032,7 @@ golang.org/x/tools/internal/typesinternal # gomodules.xyz/jsonpatch/v2 v2.3.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.134.0 +# google.golang.org/api v0.136.0 ## explicit; go 1.19 google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v0.alpha @@ -1063,7 +1069,7 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230720185612-659f7aaaa771 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails @@ -1178,7 +1184,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# helm.sh/helm/v3 v3.12.2 +# helm.sh/helm/v3 v3.12.3 ## explicit; go 1.19 helm.sh/helm/v3/internal/ignore helm.sh/helm/v3/internal/sympath @@ -1252,7 +1258,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.27.2 +# k8s.io/apiextensions-apiserver v0.27.4 ## explicit; go 1.20 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -1646,7 +1652,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.27.1 +# k8s.io/cloud-provider v0.27.4 ## explicit; go 1.20 k8s.io/cloud-provider k8s.io/cloud-provider/node/helpers @@ -1679,7 +1685,7 @@ k8s.io/component-base/version # k8s.io/component-helpers v0.27.4 ## explicit; go 1.20 k8s.io/component-helpers/node/util -# k8s.io/csi-translation-lib v0.27.0 +# k8s.io/csi-translation-lib v0.27.4 ## explicit; go 1.20 k8s.io/csi-translation-lib/plugins # k8s.io/gengo v0.0.0-20230306165830-ab3349d207d4 @@ -1768,7 +1774,7 @@ oras.land/oras-go/pkg/registry/remote/auth oras.land/oras-go/pkg/registry/remote/internal/errutil oras.land/oras-go/pkg/registry/remote/internal/syncutil oras.land/oras-go/pkg/target -# sigs.k8s.io/controller-runtime v0.15.0 +# sigs.k8s.io/controller-runtime v0.15.1 ## explicit; go 1.20 sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index f01de43810..7600387047 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -236,6 +236,11 @@ func New(config *rest.Config, opts Options) (Cache, error) { } func defaultOpts(config *rest.Config, opts Options) (Options, error) { + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + logger := log.WithName("setup") // Use the rest HTTP client for the provided config if unset diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go index f14f8a9f59..e0ff72dc13 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go @@ -152,6 +152,12 @@ func (m *mapper) getMapper() meta.RESTMapper { // addKnownGroupAndReload reloads the mapper with updated information about missing API group. // versions can be specified for partial updates, for instance for v1beta1 version only. func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) error { + // versions will here be [""] if the forwarded Version value of + // GroupVersionResource (in calling method) was not specified. + if len(versions) == 1 && versions[0] == "" { + versions = nil + } + // If no specific versions are set by user, we will scan all available ones for the API group. // This operation requires 2 requests: /api and /apis, but only once. For all subsequent calls // this data will be taken from cache. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 21067b6f8f..0d8b9fbe18 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -110,6 +110,11 @@ func newClient(config *rest.Config, options Options) (*client, error) { return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") } + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + if !options.WarningHandler.SuppressWarnings { // surface warnings logger := log.Log.WithName("KubeAPIWarningLogger") @@ -117,7 +122,6 @@ func newClient(config *rest.Config, options Options) (*client, error) { // is log.KubeAPIWarningLogger with deduplication enabled. // See log.KubeAPIWarningLoggerOptions for considerations // regarding deduplication. - config = rest.CopyConfig(config) config.WarningHandler = log.NewKubeAPIWarningLogger( logger, log.KubeAPIWarningLoggerOptions{ @@ -160,7 +164,7 @@ func newClient(config *rest.Config, options Options) (*client, error) { unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), } - rawMetaClient, err := metadata.NewForConfigAndClient(config, options.HTTPClient) + rawMetaClient, err := metadata.NewForConfigAndClient(metadata.ConfigFor(config), options.HTTPClient) if err != nil { return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index 50a461f1cc..d81bf25de9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -513,8 +513,15 @@ type MatchingLabels map[string]string // ApplyToList applies this configuration to the given list options. func (m MatchingLabels) ApplyToList(opts *ListOptions) { // TODO(directxman12): can we avoid reserializing this over and over? - sel := labels.SelectorFromValidatedSet(map[string]string(m)) - opts.LabelSelector = sel + if opts.LabelSelector == nil { + opts.LabelSelector = labels.NewSelector() + } + // If there's already a selector, we need to AND the two together. + noValidSel := labels.SelectorFromValidatedSet(map[string]string(m)) + reqs, _ := noValidSel.Requirements() + for _, req := range reqs { + opts.LabelSelector = opts.LabelSelector.Add(req) + } } // ApplyToDeleteAllOf applies this configuration to the given an List options. @@ -528,14 +535,17 @@ type HasLabels []string // ApplyToList applies this configuration to the given list options. func (m HasLabels) ApplyToList(opts *ListOptions) { - sel := labels.NewSelector() + if opts.LabelSelector == nil { + opts.LabelSelector = labels.NewSelector() + } + // TODO: ignore invalid labels will result in an empty selector. + // This is inconsistent to the that of MatchingLabels. for _, label := range m { r, err := labels.NewRequirement(label, selection.Exists, nil) if err == nil { - sel = sel.Add(*r) + opts.LabelSelector = opts.LabelSelector.Add(*r) } } - opts.LabelSelector = sel } // ApplyToDeleteAllOf applies this configuration to the given an List options. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go index b8d4146c9f..0d96951780 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -224,11 +224,11 @@ func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ... func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { if _, ok := obj.(runtime.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", subResource) + return fmt.Errorf("unstructured client did not understand object: %T", obj) } if _, ok := subResourceObj.(runtime.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", obj) + return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) } if subResourceObj.GetName() == "" { @@ -255,11 +255,11 @@ func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResour func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { if _, ok := obj.(runtime.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) + return fmt.Errorf("unstructured client did not understand object: %T", obj) } if _, ok := subResourceObj.(runtime.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", obj) + return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) } if subResourceObj.GetName() == "" { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go index 7d00c3c4b0..7ab76555b3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go @@ -179,6 +179,13 @@ func New(config *rest.Config, opts ...Option) (Cluster, error) { return nil, errors.New("must specify Config") } + originalConfig := config + + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + options := Options{} for _, opt := range opts { opt(&options) @@ -275,7 +282,7 @@ func New(config *rest.Config, opts ...Option) (Cluster, error) { } return &cluster{ - config: config, + config: originalConfig, httpClient: options.HTTPClient, scheme: options.Scheme, cache: cache, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 7e65ef0c3a..72a4a7801a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -19,6 +19,7 @@ package manager import ( "context" "crypto/tls" + "errors" "fmt" "net" "net/http" @@ -391,6 +392,9 @@ type LeaderElectionRunnable interface { // New returns a new Manager for creating Controllers. func New(config *rest.Config, options Options) (Manager, error) { + if config == nil { + return nil, errors.New("must specify Config") + } // Set default values for options fields options = setOptionsDefaults(options) @@ -412,6 +416,11 @@ func New(config *rest.Config, options Options) (Manager, error) { return nil, err } + config = rest.CopyConfig(config) + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + // Create the recorder provider to inject event recorders for the components. // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific // to the particular controller that it's being injected into, rather than a generic one like is here.