diff --git a/go.mod b/go.mod index 208607169c..adb1cb49e1 100644 --- a/go.mod +++ b/go.mod @@ -14,30 +14,30 @@ require ( github.com/Masterminds/sprig/v3 v3.2.3 github.com/apparentlymart/go-cidr v1.1.0 github.com/aws/amazon-ec2-instance-selector/v2 v2.4.1 - github.com/aws/aws-sdk-go v1.48.15 + github.com/aws/aws-sdk-go v1.49.13 github.com/blang/semver/v4 v4.0.0 - github.com/cert-manager/cert-manager v1.13.2 + github.com/cert-manager/cert-manager v1.13.3 github.com/digitalocean/godo v1.107.0 github.com/go-ini/ini v1.67.0 - github.com/go-logr/logr v1.3.0 + github.com/go-logr/logr v1.4.1 github.com/gogo/protobuf v1.3.2 github.com/google/go-cmp v0.6.0 github.com/google/go-containerregistry v0.17.0 github.com/google/go-tpm v0.9.0 github.com/google/go-tpm-tools v0.4.2 - github.com/google/uuid v1.4.0 + github.com/google/uuid v1.5.0 github.com/gophercloud/gophercloud v1.8.0 - github.com/hetznercloud/hcloud-go v1.52.0 + github.com/hetznercloud/hcloud-go v1.53.0 github.com/jacksontj/memberlistmesh v0.0.0-20190905163944-93462b9d2bb7 github.com/mitchellh/mapstructure v1.5.0 github.com/pelletier/go-toml v1.9.5 github.com/pkg/sftp v1.13.6 - github.com/prometheus/client_golang v1.17.0 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21 + github.com/prometheus/client_golang v1.18.0 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.22 github.com/sergi/go-diff v1.3.1 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.18.0 + github.com/spf13/viper v1.18.2 github.com/spotinst/spotinst-sdk-go v1.171.0 github.com/stretchr/testify v1.8.4 github.com/weaveworks/mesh v0.0.0-20191105120815-58dbcc3e8e63 @@ -48,24 +48,24 @@ require ( go.opentelemetry.io/otel/trace v1.21.0 go.opentelemetry.io/proto/otlp v1.0.0 go.uber.org/multierr v1.11.0 - golang.org/x/crypto v0.16.0 - golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb + golang.org/x/crypto v0.17.0 + golang.org/x/exp v0.0.0-20231226003508-02704c960a9b golang.org/x/net v0.19.0 golang.org/x/oauth2 v0.15.0 golang.org/x/sync v0.5.0 golang.org/x/sys v0.15.0 - google.golang.org/api v0.153.0 - google.golang.org/grpc v1.59.0 - google.golang.org/protobuf v1.31.0 + google.golang.org/api v0.154.0 + google.golang.org/grpc v1.60.1 + google.golang.org/protobuf v1.32.0 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/inf.v0 v0.9.1 gopkg.in/square/go-jose.v2 v2.6.0 - helm.sh/helm/v3 v3.13.2 + helm.sh/helm/v3 v3.13.3 k8s.io/api v0.29.0 k8s.io/apimachinery v0.29.0 k8s.io/cli-runtime v0.29.0 k8s.io/client-go v0.29.0 - k8s.io/cloud-provider-aws v1.28.3 + k8s.io/cloud-provider-aws v1.29.1 k8s.io/cloud-provider-gcp/providers v0.28.2 k8s.io/component-base v0.29.0 k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 @@ -171,7 +171,7 @@ require ( github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-isatty v0.0.17 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/miekg/dns v1.1.55 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -201,9 +201,9 @@ require ( github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect @@ -227,17 +227,17 @@ require ( golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.16.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.28.3 // indirect - k8s.io/cloud-provider v0.28.3 // indirect + k8s.io/apiextensions-apiserver v0.28.4 // indirect + k8s.io/cloud-provider v0.29.0 // indirect k8s.io/component-helpers v0.29.0 // indirect - k8s.io/csi-translation-lib v0.28.3 // indirect + k8s.io/csi-translation-lib v0.29.0 // indirect k8s.io/klog v1.0.0 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect oras.land/oras-go v1.2.4 // indirect diff --git a/go.sum b/go.sum index 31a636b968..ab3978ff43 100644 --- a/go.sum +++ b/go.sum @@ -78,8 +78,8 @@ github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= github.com/aws/amazon-ec2-instance-selector/v2 v2.4.1 h1:DmxtwV+pkakkVRhxKcAgnLbxCxvT7k8DBG271dfKPZ8= github.com/aws/amazon-ec2-instance-selector/v2 v2.4.1/go.mod h1:AEJrtkLkCkfIBIazidrVrgZqaXl+9dxI/wRgjdw+7G0= -github.com/aws/aws-sdk-go v1.48.15 h1:Gad2C4pLzuZDd5CA0Rvkfko6qUDDTOYru145gkO7w/Y= -github.com/aws/aws-sdk-go v1.48.15/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.13 h1:f4mGztsgnx2dR9r8FQYa9YW/RsKb+N7bgef4UGrOW1Y= +github.com/aws/aws-sdk-go v1.49.13/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -96,8 +96,8 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0Bsq github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cert-manager/cert-manager v1.13.2 h1:LG8+OLvxtc49CSyfjW7zHSyvlt7JVaHgRGyhfdvPpkk= -github.com/cert-manager/cert-manager v1.13.2/go.mod h1:AdfSU8muS+bj3C46YaD1VrlpXh672z5MeW/k1k5Sl1w= +github.com/cert-manager/cert-manager v1.13.3 h1:3R4G0RI7K0OkTZhWlVOC5SGZMYa2NwqmQJoyKydrz/M= +github.com/cert-manager/cert-manager v1.13.3/go.mod h1:BM2+Pt/NmSv1Zr25/MHv6BgIEF9IUxA1xAjp80qkxgc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -200,8 +200,9 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= @@ -241,6 +242,7 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= @@ -296,8 +298,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= @@ -348,8 +350,8 @@ github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06A github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hetznercloud/hcloud-go v1.52.0 h1:3r9pEulTOBB9BoArSgpQYUQVTy+Xjkg0k/QAU4c6dQ8= -github.com/hetznercloud/hcloud-go v1.52.0/go.mod h1:VzDWThl47lOnZXY0q5/LPFD+M62pfe/52TV+mOrpp9Q= +github.com/hetznercloud/hcloud-go v1.53.0 h1:xThhlJc6MdpvDAqVB7bAw+nAQuCpQMwsf3yanCis4rM= +github.com/hetznercloud/hcloud-go v1.53.0/go.mod h1:VzDWThl47lOnZXY0q5/LPFD+M62pfe/52TV+mOrpp9Q= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -414,8 +416,8 @@ github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRC github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= @@ -510,27 +512,27 @@ github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4 github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -545,8 +547,8 @@ github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6g github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sahilm/fuzzy v0.1.0 h1:FzWGaw2Opqyu+794ZQ9SYifWv2EIXpwP4q8dY1kDAwI= github.com/sahilm/fuzzy v0.1.0/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21 h1:yWfiTPwYxB0l5fGMhl/G+liULugVIHD9AU77iNLrURQ= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.22 h1:wJrcTdddKOI8TFxs8cemnhKP2EmKy3yfUKHj3ZdfzYo= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.22/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= @@ -570,8 +572,8 @@ github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.0 h1:pN6W1ub/G4OfnM+NR9p7xP9R6TltLUzp5JG9yZD3Qg0= -github.com/spf13/viper v1.18.0/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/spotinst/spotinst-sdk-go v1.171.0 h1:ZihMPEjkpIkSpawWLJt9RtCRY4mOQMGlfrkVmA03000= github.com/spotinst/spotinst-sdk-go v1.171.0/go.mod h1:Ku9c4p+kRWnQqmXkzGcTMHLcQKgLHrQZISxeKY7mPqE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -649,11 +651,11 @@ golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20231226003508-02704c960a9b h1:kLiC65FbiHWFAOu+lxwNPujcsl8VYyTYYEZnsOO1WK4= +golang.org/x/exp v0.0.0-20231226003508-02704c960a9b/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -670,7 +672,6 @@ golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -745,6 +746,7 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= @@ -770,28 +772,28 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4= -google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= +google.golang.org/api v0.154.0 h1:X7QkVKZBskztmpPKWQXgjJRPA2dJYrL6r+sYPRLj050= +google.golang.org/api v0.154.0/go.mod h1:qhSMkM85hgqiokIYsrRyKxrjfBeIhgl4Z2JmeRkYylc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f h1:2yNACc1O40tTnrsbk9Cv6oxiW8pxI/pXj0wRtdlYmgY= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f/go.mod h1:Uy9bTZJqmfrw2rIBxgGLnamc78euZULUBrLZ9XTITKI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -803,8 +805,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -833,32 +835,32 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -helm.sh/helm/v3 v3.13.2 h1:IcO9NgmmpetJODLZhR3f3q+6zzyXVKlRizKFwbi7K8w= -helm.sh/helm/v3 v3.13.2/go.mod h1:GIHDwZggaTGbedevTlrQ6DB++LBN6yuQdeGj0HNaDx0= +helm.sh/helm/v3 v3.13.3 h1:0zPEdGqHcubehJHP9emCtzRmu8oYsJFRrlVF3TFj8xY= +helm.sh/helm/v3 v3.13.3/go.mod h1:3OKO33yI3p4YEXtTITN2+4oScsHeQe71KuzhlZ+aPfg= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= -k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08= -k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc= +k8s.io/apiextensions-apiserver v0.28.4 h1:AZpKY/7wQ8n+ZYDtNHbAJBb+N4AXXJvyZx6ww6yAJvU= +k8s.io/apiextensions-apiserver v0.28.4/go.mod h1:pgQIZ1U8eJSMQcENew/0ShUTlePcSGFq6dxSxf2mwPM= k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= k8s.io/cli-runtime v0.29.0 h1:q2kC3cex4rOBLfPOnMSzV2BIrrQlx97gxHJs21KxKS4= k8s.io/cli-runtime v0.29.0/go.mod h1:VKudXp3X7wR45L+nER85YUzOQIru28HQpXr0mTdeCrk= k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= -k8s.io/cloud-provider v0.28.3 h1:9u+JjA3zIn0nqLOOa8tWnprFkffguSAhfBvo8p7LhBQ= -k8s.io/cloud-provider v0.28.3/go.mod h1:shAJxdrKu+SwwGUhkodxByPjaH8KBFZqXo6jU1F0ehI= -k8s.io/cloud-provider-aws v1.28.3 h1:JcgtHCxO3rMV3OzncW/UL3iPRgHAPnklkz6OG+DJJV4= -k8s.io/cloud-provider-aws v1.28.3/go.mod h1:Au90yabJ9C0GxpP/U3XzoP3RmkJOyXzNgopJaxwIBWo= +k8s.io/cloud-provider v0.29.0 h1:Qgk/jHsSKGRk/ltTlN6e7eaNuuamLROOzVBd0RPp94M= +k8s.io/cloud-provider v0.29.0/go.mod h1:gBCt7YYKFV4oUcJ/0xF9lS/9il4MxKunJ+ZKvh39WGo= +k8s.io/cloud-provider-aws v1.29.1 h1:v/17MNk1l79IkzDw/njmzKq0HHq3p9UvY9VmqJC7gmg= +k8s.io/cloud-provider-aws v1.29.1/go.mod h1:NV7fBE8dddkF1wa7v+ljEue2AKi5VsFgMtZ9kTRnB1c= k8s.io/cloud-provider-gcp/providers v0.28.2 h1:I65pFTLNMQSj7YuW3Mg3pZIXmw0naCmF6TGAuz4/sZE= k8s.io/cloud-provider-gcp/providers v0.28.2/go.mod h1:P8dxRvvLtX7xUwVUzA/QOqv8taCzBaVsVMnjnpjmYXE= k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= k8s.io/component-helpers v0.29.0 h1:Y8W70NGeitKxWwhsPo/vEQbQx5VqJV+3xfLpP3V1VxU= k8s.io/component-helpers v0.29.0/go.mod h1:j2coxVfmzTOXWSE6sta0MTgNSr572Dcx68F6DD+8fWc= -k8s.io/csi-translation-lib v0.28.3 h1:7deV+HZjV418AGikSDPW8dyzTpm4K3tNbQUp3KmR7cs= -k8s.io/csi-translation-lib v0.28.3/go.mod h1:zlrYwakCz2yji9/8EaJk+afIKPrYXPNXXLDO8DVuuTk= +k8s.io/csi-translation-lib v0.29.0 h1:we4X1yUlDikvm5Rv0dwMuPHNw6KwjwsQiAuOPWXha8M= +k8s.io/csi-translation-lib v0.29.0/go.mod h1:Cp6t3CNBSm1dXS17V8IImUjkqfIB6KCj8Fs8wf6uyTA= k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= diff --git a/tests/e2e/go.mod b/tests/e2e/go.mod index 678ea30286..f9ba13ae91 100644 --- a/tests/e2e/go.mod +++ b/tests/e2e/go.mod @@ -65,7 +65,7 @@ require ( github.com/aliyun/credentials-go v1.2.3 // indirect github.com/apparentlymart/go-cidr v1.1.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go v1.48.15 // indirect + github.com/aws/aws-sdk-go v1.49.13 // indirect github.com/aws/aws-sdk-go-v2 v1.18.1 // indirect github.com/aws/aws-sdk-go-v2/config v1.18.27 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.13.26 // indirect @@ -106,6 +106,7 @@ require ( github.com/emicklei/proto v1.10.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/ghodss/yaml v1.0.0 // indirect @@ -115,7 +116,7 @@ require ( github.com/go-git/go-git/v5 v5.11.0 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-jose/go-jose/v3 v3.0.0 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/analysis v0.21.4 // indirect @@ -149,7 +150,7 @@ require ( github.com/google/gofuzz v1.2.1-0.20210504230335-f78f29fc09ea // indirect github.com/google/licenseclassifier/v2 v2.0.0 // indirect github.com/google/s2a-go v0.1.7 // indirect - github.com/google/uuid v1.4.0 // indirect + github.com/google/uuid v1.5.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gophercloud/gophercloud v1.8.0 // indirect @@ -176,7 +177,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect @@ -201,10 +202,10 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pkg/sftp v1.13.6 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.17.0 // indirect - github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/client_golang v1.18.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/protocolbuffers/txtpbfmt v0.0.0-20220428173112-74888fd59c2b // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect @@ -227,7 +228,7 @@ require ( github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/spf13/cobra v1.8.0 // indirect - github.com/spf13/viper v1.18.0 // indirect + github.com/spf13/viper v1.18.2 // indirect github.com/spiffe/go-spiffe/v2 v2.1.6 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect @@ -248,6 +249,8 @@ require ( gitlab.alpinelinux.org/alpine/go v0.7.0 // indirect go.mongodb.org/mongo-driver v1.11.3 // indirect go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect go.opentelemetry.io/otel v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect @@ -256,8 +259,8 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect go4.org v0.0.0-20201209231011-d4a079459e60 // indirect - golang.org/x/crypto v0.16.0 // indirect - golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb // indirect + golang.org/x/crypto v0.17.0 // indirect + golang.org/x/exp v0.0.0-20231226003508-02704c960a9b // indirect golang.org/x/mod v0.14.0 // indirect golang.org/x/net v0.19.0 // indirect golang.org/x/oauth2 v0.15.0 // indirect @@ -269,13 +272,13 @@ require ( golang.org/x/tools v0.16.0 // indirect golang.org/x/tools/go/vcs v0.1.0-deprecated // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/api v0.153.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect - google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/api v0.154.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect + google.golang.org/grpc v1.60.1 // indirect + google.golang.org/protobuf v1.32.0 // indirect gopkg.in/gcfg.v1 v1.2.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect @@ -283,7 +286,7 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/cloud-provider v0.28.3 // indirect + k8s.io/cloud-provider v0.29.0 // indirect k8s.io/cloud-provider-gcp/providers v0.28.2 // indirect k8s.io/component-base v0.29.0 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect diff --git a/tests/e2e/go.sum b/tests/e2e/go.sum index 5861f06aea..007a17da18 100644 --- a/tests/e2e/go.sum +++ b/tests/e2e/go.sum @@ -167,8 +167,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.48.15 h1:Gad2C4pLzuZDd5CA0Rvkfko6qUDDTOYru145gkO7w/Y= -github.com/aws/aws-sdk-go v1.48.15/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.13 h1:f4mGztsgnx2dR9r8FQYa9YW/RsKb+N7bgef4UGrOW1Y= +github.com/aws/aws-sdk-go v1.49.13/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.7.1/go.mod h1:L5LuPC1ZgDr2xQS7AmIec/Jlc7O/Y1u2KxJyNVab250= github.com/aws/aws-sdk-go-v2 v1.14.0/go.mod h1:ZA3Y8V0LrlWj63MQAnRHgKf/5QB//LSZCPNWlWrNGLU= github.com/aws/aws-sdk-go-v2 v1.18.1 h1:+tefE750oAb7ZQGzla6bLkOwfcQCEtC5y2RqoqCeqKo= @@ -251,6 +251,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= @@ -323,6 +325,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= @@ -334,6 +338,8 @@ github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52/go.mod h1:yIquW8 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= @@ -372,8 +378,9 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= @@ -559,8 +566,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -715,8 +722,8 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= @@ -818,21 +825,21 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/protocolbuffers/txtpbfmt v0.0.0-20220428173112-74888fd59c2b h1:zd/2RNzIRkoGGMjE+YIsZ85CnDIz672JK2F3Zl4vux4= github.com/protocolbuffers/txtpbfmt v0.0.0-20220428173112-74888fd59c2b/go.mod h1:KjY0wibdYKc4DYkerHSbguaf3JeIPGhNJBp2BNiFH78= @@ -931,8 +938,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.18.0 h1:pN6W1ub/G4OfnM+NR9p7xP9R6TltLUzp5JG9yZD3Qg0= -github.com/spf13/viper v1.18.0/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/spiffe/go-spiffe/v2 v2.1.6 h1:4SdizuQieFyL9eNU+SPiCArH4kynzaKOOj0VvM8R7Xo= github.com/spiffe/go-spiffe/v2 v2.1.6/go.mod h1:eVDqm9xFvyqao6C+eQensb9ZPkyNEeaUbqbBpOhBnNk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1037,6 +1044,10 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= @@ -1084,8 +1095,8 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1094,8 +1105,8 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20231226003508-02704c960a9b h1:kLiC65FbiHWFAOu+lxwNPujcsl8VYyTYYEZnsOO1WK4= +golang.org/x/exp v0.0.0-20231226003508-02704c960a9b/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1251,6 +1262,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= @@ -1317,15 +1329,15 @@ google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4= -google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= +google.golang.org/api v0.154.0 h1:X7QkVKZBskztmpPKWQXgjJRPA2dJYrL6r+sYPRLj050= +google.golang.org/api v0.154.0/go.mod h1:qhSMkM85hgqiokIYsrRyKxrjfBeIhgl4Z2JmeRkYylc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1340,12 +1352,12 @@ google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= -google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= -google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f h1:2yNACc1O40tTnrsbk9Cv6oxiW8pxI/pXj0wRtdlYmgY= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f/go.mod h1:Uy9bTZJqmfrw2rIBxgGLnamc78euZULUBrLZ9XTITKI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1356,8 +1368,8 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1369,8 +1381,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= gopkg.in/alexcesaro/statsd.v2 v2.0.0/go.mod h1:i0ubccKGzBVNBpdGV5MocxyA/XlLUJzA7SLonnE4drU= @@ -1426,8 +1438,8 @@ k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= -k8s.io/cloud-provider v0.28.3 h1:9u+JjA3zIn0nqLOOa8tWnprFkffguSAhfBvo8p7LhBQ= -k8s.io/cloud-provider v0.28.3/go.mod h1:shAJxdrKu+SwwGUhkodxByPjaH8KBFZqXo6jU1F0ehI= +k8s.io/cloud-provider v0.29.0 h1:Qgk/jHsSKGRk/ltTlN6e7eaNuuamLROOzVBd0RPp94M= +k8s.io/cloud-provider v0.29.0/go.mod h1:gBCt7YYKFV4oUcJ/0xF9lS/9il4MxKunJ+ZKvh39WGo= k8s.io/cloud-provider-gcp/providers v0.28.2 h1:I65pFTLNMQSj7YuW3Mg3pZIXmw0naCmF6TGAuz4/sZE= k8s.io/cloud-provider-gcp/providers v0.28.2/go.mod h1:P8dxRvvLtX7xUwVUzA/QOqv8taCzBaVsVMnjnpjmYXE= k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 776e31b21d..c483e0cb8e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -442,6 +442,17 @@ func (c *Config) WithUseDualStack(enable bool) *Config { return c } +// WithUseFIPSEndpoint sets a config UseFIPSEndpoint value returning a Config +// pointer for chaining. +func (c *Config) WithUseFIPSEndpoint(enable bool) *Config { + if enable { + c.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + } else { + c.UseFIPSEndpoint = endpoints.FIPSEndpointStateDisabled + } + return c +} + // WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value // returning a Config pointer for chaining. func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index cdf456abe1..fdd6c505cd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -31,6 +31,7 @@ const ( ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne). CaCentral1RegionID = "ca-central-1" // Canada (Central). + CaWest1RegionID = "ca-west-1" // Canada West (Calgary). EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). EuCentral2RegionID = "eu-central-2" // Europe (Zurich). EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). @@ -190,6 +191,9 @@ var awsPartition = partition{ "ca-central-1": region{ Description: "Canada (Central)", }, + "ca-west-1": region{ + Description: "Canada West (Calgary)", + }, "eu-central-1": region{ Description: "Europe (Frankfurt)", }, @@ -291,6 +295,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -477,6 +484,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "acm-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -1269,6 +1294,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "api.ecr.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "dkr-us-east-1", }: endpoint{ @@ -1953,6 +1986,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -2251,6 +2287,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "apigateway-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -2284,6 +2329,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "apigateway-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -2442,6 +2496,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -2530,6 +2587,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -2735,6 +2795,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -3526,6 +3589,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -4005,6 +4071,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -4038,6 +4113,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -4485,6 +4569,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -5028,6 +5115,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -5092,6 +5182,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -5125,6 +5224,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -5280,6 +5388,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -5439,6 +5550,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -5570,6 +5684,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -6153,6 +6270,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -6618,6 +6738,9 @@ var awsPartition = partition{ }, "cognito-identity": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -6636,6 +6759,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6742,6 +6868,9 @@ var awsPartition = partition{ }, "cognito-idp": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -6760,6 +6889,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7327,6 +7459,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -8298,6 +8433,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "datasync-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -8331,6 +8475,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "datasync-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -8496,6 +8649,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "datazone-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "datazone.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -8816,6 +8974,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -8989,6 +9150,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9013,6 +9177,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9074,6 +9241,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "dms", }: endpoint{ @@ -9389,6 +9559,42 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "drs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "drs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "drs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "drs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "il-central-1", }: endpoint{}, @@ -9404,15 +9610,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-west-2.amazonaws.com", + }, }, }, "ds": service{ @@ -9459,6 +9689,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ds-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9492,6 +9731,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -9636,6 +9884,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9799,6 +10065,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ebs-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9832,6 +10107,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ebs-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -10160,6 +10444,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -10341,6 +10628,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -10451,6 +10741,166 @@ var awsPartition = partition{ }, }, }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "eks-auth.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "eks-auth.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "eks-auth.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "eks-auth.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "eks-auth.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "eks-auth.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "eks-auth.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "eks-auth.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "eks-auth.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "eks-auth.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "eks-auth.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "eks-auth.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "eks-auth.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "eks-auth.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "eks-auth.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "eks-auth.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "eks-auth.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "eks-auth.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "eks-auth.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "eks-auth.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "eks-auth.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "eks-auth.us-west-2.api.aws", + }, + }, + }, "elasticache": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -10489,6 +10939,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11292,6 +11745,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11452,6 +11908,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -11487,6 +11952,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -12004,6 +12478,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -12172,6 +12649,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "aos.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -12425,6 +12911,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -12675,6 +13164,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -14666,6 +15158,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -14693,6 +15188,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -15147,6 +15645,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "internetmonitor.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -16695,6 +17198,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking-fips.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "kendra-ranking.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{ @@ -16823,6 +17331,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17300,6 +17811,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17848,6 +18377,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18240,6 +18778,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18582,6 +19123,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19102,12 +19646,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -19126,6 +19676,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -19390,6 +19943,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -19445,6 +20001,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -19500,6 +20059,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -19857,6 +20419,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -20290,6 +20855,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -20498,6 +21066,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -20976,6 +21547,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21763,6 +22337,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -22568,6 +23145,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "qbusiness.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "qbusiness.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -22840,6 +23422,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ram-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -22873,6 +23464,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ram-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -23003,6 +23603,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "rbin-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -23036,6 +23645,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "rbin-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -23175,6 +23793,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -23217,6 +23853,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "rds-fips.ca-west-1", + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "rds-fips.us-east-1", }: endpoint{ @@ -23271,6 +23916,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "rds.ca-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "rds.us-east-1", }: endpoint{ @@ -23573,6 +24236,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "redshift-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -23606,6 +24278,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "redshift-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -24249,6 +24930,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -24404,6 +25088,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -24413,18 +25100,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -24434,6 +25130,48 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -24443,15 +25181,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", + }, }, }, "route53": service{ @@ -24548,6 +25310,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -24788,6 +25553,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -25065,6 +25833,27 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -25150,6 +25939,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -26394,6 +27192,27 @@ var awsPartition = partition{ Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -26602,6 +27421,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -27058,6 +27880,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -27581,6 +28406,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -28549,6 +29377,15 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -28573,6 +29410,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "sns-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -28703,6 +29549,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -28860,6 +29709,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ssm-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -28893,6 +29751,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ssm-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -29477,6 +30344,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -29796,6 +30666,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -29902,6 +30775,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -30076,6 +30952,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -30224,6 +31103,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -30372,6 +31254,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -33298,6 +34183,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -34113,6 +35001,31 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "eks-auth.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "eks-auth.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "elasticache": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -34772,6 +35685,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "polly": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36452,6 +37375,13 @@ var awsusgovPartition = partition{ }, }, }, + "bedrock": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "cassandra": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -37664,6 +38594,31 @@ var awsusgovPartition = partition{ }, }, }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "eks-auth.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "eks-auth.us-gov-west-1.api.aws", + }, + }, + }, "elasticache": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -40361,12 +41316,42 @@ var awsusgovPartition = partition{ }, "rolesanywhere": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", + }, }, }, "route53": service{ @@ -42791,12 +43776,42 @@ var awsisoPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, "rbin": service{ @@ -43021,15 +44036,61 @@ var awsisoPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{ Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov", + }, }, }, "secretsmanager": service{ @@ -43661,9 +44722,24 @@ var awsisobPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "rbin": service{ @@ -43802,9 +44878,30 @@ var awsisobPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "secretsmanager": service{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index fc04865b00..60703e5b94 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.48.15" +const SDKVersion = "1.49.13" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index b2008fba6c..ccc59011db 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -58035,6 +58035,25 @@ type AdvertiseByoipCidrInput struct { // the required permissions, the error response is DryRunOperation. Otherwise, // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + + // If you have Local Zones (https://docs.aws.amazon.com/local-zones/latest/ug/how-local-zones-work.html) + // enabled, you can choose a network border group for Local Zones when you provision + // and advertise a BYOIPv4 CIDR. Choose the network border group carefully as + // the EIP and the Amazon Web Services resource it is associated with must reside + // in the same network border group. + // + // You can provision BYOIP address ranges to and advertise them in the following + // Local Zone network border groups: + // + // * us-east-1-dfw-2 + // + // * us-west-2-lax-1 + // + // * us-west-2-phx-2 + // + // You cannot provision or advertise BYOIPv6 address ranges in Local Zones at + // this time. + NetworkBorderGroup *string `type:"string"` } // String returns the string representation. @@ -58086,6 +58105,12 @@ func (s *AdvertiseByoipCidrInput) SetDryRun(v bool) *AdvertiseByoipCidrInput { return s } +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *AdvertiseByoipCidrInput) SetNetworkBorderGroup(v string) *AdvertiseByoipCidrInput { + s.NetworkBorderGroup = &v + return s +} + type AdvertiseByoipCidrOutput struct { _ struct{} `type:"structure"` @@ -64318,6 +64343,25 @@ type ByoipCidr struct { // The description of the address range. Description *string `locationName:"description" type:"string"` + // If you have Local Zones (https://docs.aws.amazon.com/local-zones/latest/ug/how-local-zones-work.html) + // enabled, you can choose a network border group for Local Zones when you provision + // and advertise a BYOIPv4 CIDR. Choose the network border group carefully as + // the EIP and the Amazon Web Services resource it is associated with must reside + // in the same network border group. + // + // You can provision BYOIP address ranges to and advertise them in the following + // Local Zone network border groups: + // + // * us-east-1-dfw-2 + // + // * us-west-2-lax-1 + // + // * us-west-2-phx-2 + // + // You cannot provision or advertise BYOIPv6 address ranges in Local Zones at + // this time. + NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` + // The state of the address pool. State *string `locationName:"state" type:"string" enum:"ByoipCidrState"` @@ -64362,6 +64406,12 @@ func (s *ByoipCidr) SetDescription(v string) *ByoipCidr { return s } +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *ByoipCidr) SetNetworkBorderGroup(v string) *ByoipCidr { + s.NetworkBorderGroup = &v + return s +} + // SetState sets the State field's value. func (s *ByoipCidr) SetState(v string) *ByoipCidr { s.State = &v @@ -135752,6 +135802,11 @@ type InstanceNetworkInterfaceSpecification struct { // for eth0, and can only be assigned to a new network interface, not an existing // one. You cannot specify more than one network interface in the request. If // launching into a default subnet, the default value is true. + // + // Starting on February 1, 2024, Amazon Web Services will charge for all public + // IPv4 addresses, including public IPv4 addresses associated with running instances + // and Elastic IP addresses. For more information, see the Public IPv4 Address + // tab on the Amazon VPC pricing page (http://aws.amazon.com/vpc/pricing/). AssociatePublicIpAddress *bool `locationName:"associatePublicIpAddress" type:"boolean"` // A security group connection tracking specification that enables you to set @@ -142771,20 +142826,18 @@ type LaunchTemplateInstanceMetadataOptions struct { // Possible values: Integers from 1 to 64 HttpPutResponseHopLimit *int64 `locationName:"httpPutResponseHopLimit" type:"integer"` - // Indicates whether IMDSv2 is optional or required. + // Indicates whether IMDSv2 is required. // - // optional - When IMDSv2 is optional, you can choose to retrieve instance metadata - // with or without a session token in your request. If you retrieve the IAM - // role credentials without a token, the IMDSv1 role credentials are returned. - // If you retrieve the IAM role credentials using a valid session token, the - // IMDSv2 role credentials are returned. + // * optional - IMDSv2 is optional. You can choose whether to send a session + // token in your instance metadata retrieval requests. If you retrieve IAM + // role credentials without a session token, you receive the IMDSv1 role + // credentials. If you retrieve IAM role credentials using a valid session + // token, you receive the IMDSv2 role credentials. // - // required - When IMDSv2 is required, you must send a session token with any - // instance metadata retrieval requests. In this state, retrieving the IAM role - // credentials always returns IMDSv2 credentials; IMDSv1 credentials are not - // available. - // - // Default: optional + // * required - IMDSv2 is required. You must send a session token in your + // instance metadata retrieval requests. With this option, retrieving the + // IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials + // are not available. HttpTokens *string `locationName:"httpTokens" type:"string" enum:"LaunchTemplateHttpTokensState"` // Set to enabled to allow access to instance tags from the instance metadata. @@ -142884,22 +142937,21 @@ type LaunchTemplateInstanceMetadataOptionsRequest struct { // Possible values: Integers from 1 to 64 HttpPutResponseHopLimit *int64 `type:"integer"` - // IMDSv2 uses token-backed sessions. Set the use of HTTP tokens to optional - // (in other words, set the use of IMDSv2 to optional) or required (in other - // words, set the use of IMDSv2 to required). + // Indicates whether IMDSv2 is required. // - // * optional - When IMDSv2 is optional, you can choose to retrieve instance - // metadata with or without a session token in your request. If you retrieve - // the IAM role credentials without a token, the IMDSv1 role credentials - // are returned. If you retrieve the IAM role credentials using a valid session - // token, the IMDSv2 role credentials are returned. + // * optional - IMDSv2 is optional. You can choose whether to send a session + // token in your instance metadata retrieval requests. If you retrieve IAM + // role credentials without a session token, you receive the IMDSv1 role + // credentials. If you retrieve IAM role credentials using a valid session + // token, you receive the IMDSv2 role credentials. // - // * required - When IMDSv2 is required, you must send a session token with - // any instance metadata retrieval requests. In this state, retrieving the + // * required - IMDSv2 is required. You must send a session token in your + // instance metadata retrieval requests. With this option, retrieving the // IAM role credentials always returns IMDSv2 credentials; IMDSv1 credentials // are not available. // - // Default: optional + // Default: If the value of ImdsSupport for the Amazon Machine Image (AMI) for + // your instance is v2.0, the default is required. HttpTokens *string `type:"string" enum:"LaunchTemplateHttpTokensState"` // Set to enabled to allow access to instance tags from the instance metadata. @@ -142974,6 +143026,11 @@ type LaunchTemplateInstanceNetworkInterfaceSpecification struct { // Indicates whether to associate a public IPv4 address with eth0 for a new // network interface. + // + // Starting on February 1, 2024, Amazon Web Services will charge for all public + // IPv4 addresses, including public IPv4 addresses associated with running instances + // and Elastic IP addresses. For more information, see the Public IPv4 Address + // tab on the Amazon VPC pricing page (http://aws.amazon.com/vpc/pricing/). AssociatePublicIpAddress *bool `locationName:"associatePublicIpAddress" type:"boolean"` // A security group connection tracking specification that enables you to set @@ -143210,6 +143267,11 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { AssociateCarrierIpAddress *bool `type:"boolean"` // Associates a public IPv4 address with eth0 for a new network interface. + // + // Starting on February 1, 2024, Amazon Web Services will charge for all public + // IPv4 addresses, including public IPv4 addresses associated with running instances + // and Elastic IP addresses. For more information, see the Public IPv4 Address + // tab on the Amazon VPC pricing page (http://aws.amazon.com/vpc/pricing/). AssociatePublicIpAddress *bool `type:"boolean"` // A security group connection tracking specification that enables you to set @@ -161174,6 +161236,25 @@ type ProvisionByoipCidrInput struct { // Reserved. MultiRegion *bool `type:"boolean"` + // If you have Local Zones (https://docs.aws.amazon.com/local-zones/latest/ug/how-local-zones-work.html) + // enabled, you can choose a network border group for Local Zones when you provision + // and advertise a BYOIPv4 CIDR. Choose the network border group carefully as + // the EIP and the Amazon Web Services resource it is associated with must reside + // in the same network border group. + // + // You can provision BYOIP address ranges to and advertise them in the following + // Local Zone network border groups: + // + // * us-east-1-dfw-2 + // + // * us-west-2-lax-1 + // + // * us-west-2-phx-2 + // + // You cannot provision or advertise BYOIPv6 address ranges in Local Zones at + // this time. + NetworkBorderGroup *string `type:"string"` + // The tags to apply to the address pool. PoolTagSpecifications []*TagSpecification `locationName:"PoolTagSpecification" locationNameList:"item" type:"list"` @@ -161250,6 +161331,12 @@ func (s *ProvisionByoipCidrInput) SetMultiRegion(v bool) *ProvisionByoipCidrInpu return s } +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *ProvisionByoipCidrInput) SetNetworkBorderGroup(v string) *ProvisionByoipCidrInput { + s.NetworkBorderGroup = &v + return s +} + // SetPoolTagSpecifications sets the PoolTagSpecifications field's value. func (s *ProvisionByoipCidrInput) SetPoolTagSpecifications(v []*TagSpecification) *ProvisionByoipCidrInput { s.PoolTagSpecifications = v @@ -165874,23 +165961,7 @@ type RequestLaunchTemplateData struct { SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"SecurityGroup" type:"list"` // The tags to apply to the resources that are created during instance launch. - // - // You can specify tags for the following resources only: - // - // * Instances - // - // * Volumes - // - // * Elastic graphics - // - // * Spot Instance requests - // - // * Network interfaces - // - // To tag a resource after it has been created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). - // - // To tag the launch template itself, you must use the TagSpecification (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplate.html) - // parameter. + // These tags are not applied to the launch template. TagSpecifications []*LaunchTemplateTagSpecificationRequest `locationName:"TagSpecification" locationNameList:"LaunchTemplateTagSpecificationRequest" type:"list"` // The user data to make available to the instance. You must provide base64-encoded @@ -194347,6 +194418,33 @@ const ( // InstanceTypeDl2q24xlarge is a InstanceType enum value InstanceTypeDl2q24xlarge = "dl2q.24xlarge" + + // InstanceTypeMac2M2Metal is a InstanceType enum value + InstanceTypeMac2M2Metal = "mac2-m2.metal" + + // InstanceTypeI4i12xlarge is a InstanceType enum value + InstanceTypeI4i12xlarge = "i4i.12xlarge" + + // InstanceTypeI4i24xlarge is a InstanceType enum value + InstanceTypeI4i24xlarge = "i4i.24xlarge" + + // InstanceTypeC7iMetal24xl is a InstanceType enum value + InstanceTypeC7iMetal24xl = "c7i.metal-24xl" + + // InstanceTypeC7iMetal48xl is a InstanceType enum value + InstanceTypeC7iMetal48xl = "c7i.metal-48xl" + + // InstanceTypeM7iMetal24xl is a InstanceType enum value + InstanceTypeM7iMetal24xl = "m7i.metal-24xl" + + // InstanceTypeM7iMetal48xl is a InstanceType enum value + InstanceTypeM7iMetal48xl = "m7i.metal-48xl" + + // InstanceTypeR7iMetal24xl is a InstanceType enum value + InstanceTypeR7iMetal24xl = "r7i.metal-24xl" + + // InstanceTypeR7iMetal48xl is a InstanceType enum value + InstanceTypeR7iMetal48xl = "r7i.metal-48xl" ) // InstanceType_Values returns all elements of the InstanceType enum @@ -195124,6 +195222,15 @@ func InstanceType_Values() []string { InstanceTypeR7i24xlarge, InstanceTypeR7i48xlarge, InstanceTypeDl2q24xlarge, + InstanceTypeMac2M2Metal, + InstanceTypeI4i12xlarge, + InstanceTypeI4i24xlarge, + InstanceTypeC7iMetal24xl, + InstanceTypeC7iMetal48xl, + InstanceTypeM7iMetal24xl, + InstanceTypeM7iMetal48xl, + InstanceTypeR7iMetal24xl, + InstanceTypeR7iMetal48xl, } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go index fa9401a0ea..389613848b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go @@ -1360,10 +1360,10 @@ func (c *IAM) CreateOpenIDConnectProviderRequest(input *CreateOpenIDConnectProvi // Amazon Web Services secures communication with some OIDC identity providers // (IdPs) through our library of trusted root certificate authorities (CAs) // instead of using a certificate thumbprint to verify your IdP server certificate. -// These OIDC IdPs include Auth0, GitHub, Google, and those that use an Amazon -// S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your -// legacy thumbprint remains in your configuration, but is no longer used for -// validation. +// In these cases, your legacy thumbprint remains in your configuration, but +// is no longer used for validation. These OIDC IdPs include Auth0, GitHub, +// GitLab, Google, and those that use an Amazon S3 bucket to host a JSON Web +// Key Set (JWKS) endpoint. // // The trust for the OIDC provider is derived from the IAM provider that this // operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider @@ -16953,10 +16953,10 @@ func (c *IAM) UpdateOpenIDConnectProviderThumbprintRequest(input *UpdateOpenIDCo // Amazon Web Services secures communication with some OIDC identity providers // (IdPs) through our library of trusted root certificate authorities (CAs) // instead of using a certificate thumbprint to verify your IdP server certificate. -// These OIDC IdPs include Auth0, GitHub, Google, and those that use an Amazon -// S3 bucket to host a JSON Web Key Set (JWKS) endpoint. In these cases, your -// legacy thumbprint remains in your configuration, but is no longer used for -// validation. +// In these cases, your legacy thumbprint remains in your configuration, but +// is no longer used for validation. These OIDC IdPs include Auth0, GitHub, +// GitLab, Google, and those that use an Amazon S3 bucket to host a JSON Web +// Key Set (JWKS) endpoint. // // Trust for the OIDC provider is derived from the provider certificate and // is validated by the thumbprint. Therefore, it is best to limit access to diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go index 6d70114087..90fad2676f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go @@ -75,6 +75,9 @@ func (c *KMS) CancelKeyDeletionRequest(input *CancelKeyDeletionInput) (req *requ // // Related operations: ScheduleKeyDeletion // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -270,6 +273,9 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r // // - UpdateCustomKeyStore // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -470,6 +476,9 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, // // - UpdateAlias // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -664,6 +673,9 @@ func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req // // - UpdateCustomKeyStore // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -791,7 +803,7 @@ func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req // - XksProxyVpcEndpointServiceInvalidConfigurationException // The request was rejected because the Amazon VPC endpoint service configuration // does not fulfill the requirements for an external key store proxy. For details, -// see the exception message and review the requirements (kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements) +// see the exception message and review the requirements (https://docs.aws.amazon.com/kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements) // for Amazon VPC endpoint service connectivity for an external key store. // // - XksProxyInvalidResponseException @@ -922,6 +934,9 @@ func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request, // // - RevokeGrant // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1218,6 +1233,9 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out // // - ScheduleKeyDeletion // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1462,10 +1480,10 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output // or any Amazon Web Services SDK. Use the Recipient parameter to provide the // attestation document for the enclave. Instead of the plaintext data, the // response includes the plaintext data encrypted with the public key from the -// attestation document (CiphertextForRecipient).For information about the interaction -// between KMS and Amazon Web Services Nitro Enclaves, see How Amazon Web Services -// Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) -// in the Key Management Service Developer Guide.. +// attestation document (CiphertextForRecipient). For information about the +// interaction between KMS and Amazon Web Services Nitro Enclaves, see How Amazon +// Web Services Nitro Enclaves uses KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) +// in the Key Management Service Developer Guide. // // The KMS key that you use for this operation must be in a compatible key state. // For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) @@ -1488,6 +1506,9 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output // // - ReEncrypt // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1672,6 +1693,9 @@ func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, // // - UpdateAlias // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1827,6 +1851,9 @@ func (c *KMS) DeleteCustomKeyStoreRequest(input *DeleteCustomKeyStoreInput) (req // // - UpdateCustomKeyStore // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1971,6 +1998,9 @@ func (c *KMS) DeleteImportedKeyMaterialRequest(input *DeleteImportedKeyMaterialI // // - ImportKeyMaterial // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2140,6 +2170,9 @@ func (c *KMS) DescribeCustomKeyStoresRequest(input *DescribeCustomKeyStoresInput // // - UpdateCustomKeyStore // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2288,11 +2321,11 @@ func (c *KMS) DescribeKeyRequest(input *DescribeKeyInput) (req *request.Request, // signing, or generating and verifying MACs) and the algorithms that the KMS // key supports. // -// For multi-Region keys (kms/latest/developerguide/multi-region-keys-overview.html), +// For multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html), // DescribeKey displays the primary key and all related replica keys. For KMS -// keys in CloudHSM key stores (kms/latest/developerguide/keystore-cloudhsm.html), +// keys in CloudHSM key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-cloudhsm.html), // it includes information about the key store, such as the key store ID and -// the CloudHSM cluster ID. For KMS keys in external key stores (kms/latest/developerguide/keystore-external.html), +// the CloudHSM cluster ID. For KMS keys in external key stores (https://docs.aws.amazon.com/kms/latest/developerguide/keystore-external.html), // it includes the custom key store ID and the ID of the external key. // // DescribeKey does not return the following information: @@ -2338,6 +2371,9 @@ func (c *KMS) DescribeKeyRequest(input *DescribeKeyInput) (req *request.Request, // // - ListRetirableGrants // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2448,6 +2484,9 @@ func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, o // // Related operations: EnableKey // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2593,6 +2632,9 @@ func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *re // // - GetKeyRotationStatus // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2752,6 +2794,9 @@ func (c *KMS) DisconnectCustomKeyStoreRequest(input *DisconnectCustomKeyStoreInp // // - UpdateCustomKeyStore // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2879,6 +2924,9 @@ func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, out // // Related operations: DisableKey // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2994,7 +3042,7 @@ func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *requ // Enables automatic rotation of the key material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) // of the specified symmetric encryption KMS key. // -// When you enable automatic rotation of acustomer managed KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), +// When you enable automatic rotation of a customer managed KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), // KMS rotates the key material of the KMS key one year (approximately 365 days) // from the enable date and every year thereafter. You can monitor rotation // of the key material for your KMS keys in CloudTrail and Amazon CloudWatch. @@ -3043,6 +3091,9 @@ func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *requ // // - GetKeyRotationStatus // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3220,6 +3271,9 @@ func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output // // - GenerateDataKeyPair // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3445,6 +3499,9 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request. // // - GenerateDataKeyWithoutPlaintext // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3582,8 +3639,8 @@ func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req * // private key that is encrypted under the symmetric encryption KMS key you // specify. You can use the data key pair to perform asymmetric cryptography // and implement digital signatures outside of KMS. The bytes in the keys are -// random; they not related to the caller or to the KMS key that is used to -// encrypt the private key. +// random; they are not related to the caller or to the KMS key that is used +// to encrypt the private key. // // You can use the public key that GenerateDataKeyPair returns to encrypt data // or verify a signature outside of KMS. Then, store the encrypted private key @@ -3660,6 +3717,9 @@ func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req * // // - GenerateDataKeyWithoutPlaintext // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3854,6 +3914,9 @@ func (c *KMS) GenerateDataKeyPairWithoutPlaintextRequest(input *GenerateDataKeyP // // - GenerateDataKeyWithoutPlaintext // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4060,6 +4123,9 @@ func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWitho // // - GenerateDataKeyPairWithoutPlaintext // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4226,6 +4292,9 @@ func (c *KMS) GenerateMacRequest(input *GenerateMacInput) (req *request.Request, // // Related operations: VerifyMac // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4383,6 +4452,9 @@ func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Re // Required permissions: kms:GenerateRandom (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (IAM policy) // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4510,7 +4582,10 @@ func (c *KMS) GetKeyPolicyRequest(input *GetKeyPolicyInput) (req *request.Reques // Required permissions: kms:GetKeyPolicy (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (key policy) // -// Related operations: PutKeyPolicy +// Related operations: PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html) +// +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4676,6 +4751,9 @@ func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req // // - EnableKeyRotation // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4800,11 +4878,11 @@ func (c *KMS) GetParametersForImportRequest(input *GetParametersForImportInput) // Origin value of EXTERNAL to create a KMS key with no key material. You can // import key material for a symmetric encryption KMS key, HMAC KMS key, asymmetric // encryption KMS key, or asymmetric signing KMS key. You can also import key -// material into a multi-Region key (kms/latest/developerguide/multi-region-keys-overview.html) +// material into a multi-Region key (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) // of any supported type. However, you can't import key material into a KMS -// key in a custom key store (kms/latest/developerguide/custom-key-store-overview.html). +// key in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). // You can also use GetParametersForImport to get a public key and import token -// to reimport the original key material (kms/latest/developerguide/importing-keys.html#reimport-key-material) +// to reimport the original key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material) // into a KMS key whose key material expired or was deleted. // // GetParametersForImport returns the items that you need to import your key @@ -4853,6 +4931,9 @@ func (c *KMS) GetParametersForImportRequest(input *GetParametersForImportInput) // // - DeleteImportedKeyMaterial // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5017,6 +5098,9 @@ func (c *KMS) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Reques // // Related operations: CreateKey // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5256,6 +5340,9 @@ func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *requ // // - GetParametersForImport // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5431,6 +5518,9 @@ func (c *KMS) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, // // - UpdateAlias // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5615,6 +5705,9 @@ func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, o // // - RevokeGrant // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5799,7 +5892,10 @@ func (c *KMS) ListKeyPoliciesRequest(input *ListKeyPoliciesInput) (req *request. // // - GetKeyPolicy // -// - PutKeyPolicy +// - PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html) +// +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5983,6 +6079,9 @@ func (c *KMS) ListKeysRequest(input *ListKeysInput) (req *request.Request, outpu // // - ListResourceTags // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6149,6 +6248,9 @@ func (c *KMS) ListResourceTagsRequest(input *ListResourceTagsInput) (req *reques // // - UntagResource // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6311,14 +6413,22 @@ func (c *KMS) ListRetirableGrantsRequest(input *ListRetirableGrantsInput) (req * // grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). // // Cross-account use: You must specify a principal in your Amazon Web Services -// account. However, this operation can return grants in any Amazon Web Services -// account. You do not need kms:ListRetirableGrants permission (or any other -// additional permission) in any Amazon Web Services account other than your -// own. +// account. This operation returns a list of grants where the retiring principal +// specified in the ListRetirableGrants request is the same retiring principal +// on the grant. This can include grants on KMS keys owned by other Amazon Web +// Services accounts, but you do not need kms:ListRetirableGrants permission +// (or any other additional permission) in any Amazon Web Services account other +// than your own. // // Required permissions: kms:ListRetirableGrants (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) // (IAM policy) in your Amazon Web Services account. // +// KMS authorizes ListRetirableGrants requests by evaluating the caller account's +// kms:ListRetirableGrants permissions. The authorized resource in ListRetirableGrants +// calls is the retiring principal specified in the request. KMS does not evaluate +// the caller's permissions to verify their access to any KMS keys or grants +// that might be returned by the ListRetirableGrants call. +// // Related operations: // // - CreateGrant @@ -6329,6 +6439,9 @@ func (c *KMS) ListRetirableGrantsRequest(input *ListRetirableGrantsInput) (req * // // - RevokeGrant // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6493,6 +6606,9 @@ func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Reques // // Related operations: GetKeyPolicy // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6697,6 +6813,9 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out // // - GenerateDataKeyPair // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6900,7 +7019,7 @@ func (c *KMS) ReplicateKeyRequest(input *ReplicateKeyInput) (req *request.Reques // If you replicate a multi-Region primary key with imported key material, the // replica key is created with no key material. You must import the same key // material that you imported into the primary key. For details, see Importing -// key material into multi-Region keys (kms/latest/developerguide/multi-region-keys-import.html) +// key material into multi-Region keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-import.html) // in the Key Management Service Developer Guide. // // To convert a replica key to a primary key, use the UpdatePrimaryRegion operation. @@ -6927,6 +7046,9 @@ func (c *KMS) ReplicateKeyRequest(input *ReplicateKeyInput) (req *request.Reques // // - UpdatePrimaryRegion // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7073,7 +7195,7 @@ func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request, // Cross-account use: Yes. You can retire a grant on a KMS key in a different // Amazon Web Services account. // -// Required permissions::Permission to retire a grant is determined primarily +// Required permissions: Permission to retire a grant is determined primarily // by the grant. For details, see Retiring and revoking grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) // in the Key Management Service Developer Guide. // @@ -7087,6 +7209,9 @@ func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request, // // - RevokeGrant // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7236,6 +7361,9 @@ func (c *KMS) RevokeGrantRequest(input *RevokeGrantInput) (req *request.Request, // // - RetireGrant // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7360,7 +7488,7 @@ func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req * // // Deleting a KMS key is a destructive and potentially dangerous operation. // When a KMS key is deleted, all data that was encrypted under the KMS key -// is unrecoverable. (The only exception is a multi-Region replica key (kms/latest/developerguide/multi-region-keys-delete.html), +// is unrecoverable. (The only exception is a multi-Region replica key (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html), // or an asymmetric or HMAC KMS key with imported key material (kms/latest/developerguide/importing-keys-managing.html#import-delete-key).) // To prevent the use of a KMS key without deleting it, use DisableKey. // @@ -7406,6 +7534,9 @@ func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req * // // - DisableKey // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7566,6 +7697,9 @@ func (c *KMS) SignRequest(input *SignInput) (req *request.Request, output *SignO // // Related operations: Verify // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7744,6 +7878,9 @@ func (c *KMS) TagResourceRequest(input *TagResourceInput) (req *request.Request, // // - UntagResource // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7892,6 +8029,9 @@ func (c *KMS) UntagResourceRequest(input *UntagResourceInput) (req *request.Requ // // - TagResource // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -8050,6 +8190,9 @@ func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, // // - ListAliases // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -8240,6 +8383,9 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req // // - DisconnectCustomKeyStore // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -8390,7 +8536,7 @@ func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req // - XksProxyVpcEndpointServiceInvalidConfigurationException // The request was rejected because the Amazon VPC endpoint service configuration // does not fulfill the requirements for an external key store proxy. For details, -// see the exception message and review the requirements (kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements) +// see the exception message and review the requirements (https://docs.aws.amazon.com/kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements) // for Amazon VPC endpoint service connectivity for an external key store. // // - XksProxyInvalidResponseException @@ -8489,6 +8635,9 @@ func (c *KMS) UpdateKeyDescriptionRequest(input *UpdateKeyDescriptionInput) (req // // - DescribeKey // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -8667,6 +8816,9 @@ func (c *KMS) UpdatePrimaryRegionRequest(input *UpdatePrimaryRegionInput) (req * // // - ReplicateKey // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -8823,6 +8975,9 @@ func (c *KMS) VerifyRequest(input *VerifyInput) (req *request.Request, output *V // // Related operations: Sign // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -8988,6 +9143,9 @@ func (c *KMS) VerifyMacRequest(input *VerifyMacInput) (req *request.Request, out // // Related operations: GenerateMac // +// Eventual consistency: The KMS API follows an eventual consistency model. +// For more information, see KMS eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/programming-eventual-consistency.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -10454,8 +10612,8 @@ type CreateKeyInput struct { // in the Key Management Service Developer Guide. // // Use this parameter only when you intend to prevent the principal that is - // making the request from making a subsequent PutKeyPolicy request on the KMS - // key. + // making the request from making a subsequent PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html) + // request on the KMS key. BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` // Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). @@ -11459,7 +11617,7 @@ type DecryptInput struct { // To get the alias name and alias ARN, use ListAliases. KeyId *string `min:"1" type:"string"` - // A signed attestation document (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitro-enclave-how.html#term-attestdoc) + // A signed attestation document (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-concepts.html#term-attestdoc) // from an Amazon Web Services Nitro enclave and the encryption algorithm to // use with the enclave's public key. The only valid encryption algorithm is // RSAES_OAEP_SHA_256. @@ -14537,8 +14695,8 @@ type GetParametersForImportInput struct { // algorithm with the RSA_2048 wrapping key spec to wrap ECC_NIST_P521 key // material. // - // * RSAES_PKCS1_V1_5 (Deprecated) — Supported only for symmetric encryption - // key material (and only in legacy mode). + // * RSAES_PKCS1_V1_5 (Deprecated) — As of October 10, 2023, KMS does not + // support the RSAES_PKCS1_V1_5 wrapping algorithm. // // WrappingAlgorithm is a required field WrappingAlgorithm *string `type:"string" required:"true" enum:"AlgorithmSpec"` @@ -17791,8 +17949,8 @@ type PutKeyPolicyInput struct { // in the Key Management Service Developer Guide. // // Use this parameter only when you intend to prevent the principal that is - // making the request from making a subsequent PutKeyPolicy request on the KMS - // key. + // making the request from making a subsequent PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html) + // request on the KMS key. BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` // Sets the key policy on the specified KMS key. @@ -18330,8 +18488,8 @@ type ReplicateKeyInput struct { // in the Key Management Service Developer Guide. // // Use this parameter only when you intend to prevent the principal that is - // making the request from making a subsequent PutKeyPolicy request on the KMS - // key. + // making the request from making a subsequent PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html) + // request on the KMS key. BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` // A description of the KMS key. The default value is an empty string (no description). @@ -21550,7 +21708,7 @@ func (s *XksProxyVpcEndpointServiceInUseException) RequestID() string { // The request was rejected because the Amazon VPC endpoint service configuration // does not fulfill the requirements for an external key store proxy. For details, -// see the exception message and review the requirements (kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements) +// see the exception message and review the requirements (https://docs.aws.amazon.com/kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements) // for Amazon VPC endpoint service connectivity for an external key store. type XksProxyVpcEndpointServiceInvalidConfigurationException struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go b/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go index 5810320c11..2149d82fae 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go @@ -466,7 +466,7 @@ const ( // // The request was rejected because the Amazon VPC endpoint service configuration // does not fulfill the requirements for an external key store proxy. For details, - // see the exception message and review the requirements (kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements) + // see the exception message and review the requirements (https://docs.aws.amazon.com/kms/latest/developerguide/vpc-connectivity.html#xks-vpc-requirements) // for Amazon VPC endpoint service connectivity for an external key store. ErrCodeXksProxyVpcEndpointServiceInvalidConfigurationException = "XksProxyVpcEndpointServiceInvalidConfigurationException" diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go index c7e98c2c3d..af72699ef9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go @@ -18342,11 +18342,6 @@ type ResourceRecordSet struct { // You can't use the * wildcard for resource records sets that have a type // of NS. // - // You can use the * wildcard as the leftmost label in a domain name, for example, - // *.example.com. You can't use an * for one of the middle labels, for example, - // marketing.*.example.com. In addition, the * must replace the entire label; - // for example, you can't specify prod*.example.com. - // // Name is a required field Name *string `type:"string" required:"true"` @@ -20470,6 +20465,9 @@ const ( // CloudWatchRegionIlCentral1 is a CloudWatchRegion enum value CloudWatchRegionIlCentral1 = "il-central-1" + + // CloudWatchRegionCaWest1 is a CloudWatchRegion enum value + CloudWatchRegionCaWest1 = "ca-west-1" ) // CloudWatchRegion_Values returns all elements of the CloudWatchRegion enum @@ -20510,6 +20508,7 @@ func CloudWatchRegion_Values() []string { CloudWatchRegionUsIsobEast1, CloudWatchRegionApSoutheast4, CloudWatchRegionIlCentral1, + CloudWatchRegionCaWest1, } } @@ -20855,6 +20854,9 @@ const ( // ResourceRecordSetRegionIlCentral1 is a ResourceRecordSetRegion enum value ResourceRecordSetRegionIlCentral1 = "il-central-1" + + // ResourceRecordSetRegionCaWest1 is a ResourceRecordSetRegion enum value + ResourceRecordSetRegionCaWest1 = "ca-west-1" ) // ResourceRecordSetRegion_Values returns all elements of the ResourceRecordSetRegion enum @@ -20890,6 +20892,7 @@ func ResourceRecordSetRegion_Values() []string { ResourceRecordSetRegionEuSouth2, ResourceRecordSetRegionApSoutheast4, ResourceRecordSetRegionIlCentral1, + ResourceRecordSetRegionCaWest1, } } @@ -21051,6 +21054,9 @@ const ( // VPCRegionIlCentral1 is a VPCRegion enum value VPCRegionIlCentral1 = "il-central-1" + + // VPCRegionCaWest1 is a VPCRegion enum value + VPCRegionCaWest1 = "ca-west-1" ) // VPCRegion_Values returns all elements of the VPCRegion enum @@ -21090,5 +21096,6 @@ func VPCRegion_Values() []string { VPCRegionEuSouth2, VPCRegionApSoutheast4, VPCRegionIlCentral1, + VPCRegionCaWest1, } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/errors.go index de1e86dca2..8c1ff8d250 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/errors.go @@ -9,22 +9,22 @@ import ( const ( // ErrCodeBatchEntryIdsNotDistinct for service response error code - // "BatchEntryIdsNotDistinct". + // "AWS.SimpleQueueService.BatchEntryIdsNotDistinct". // // Two or more batch entries in the request have the same Id. - ErrCodeBatchEntryIdsNotDistinct = "BatchEntryIdsNotDistinct" + ErrCodeBatchEntryIdsNotDistinct = "AWS.SimpleQueueService.BatchEntryIdsNotDistinct" // ErrCodeBatchRequestTooLong for service response error code - // "BatchRequestTooLong". + // "AWS.SimpleQueueService.BatchRequestTooLong". // // The length of all the messages put together is more than the limit. - ErrCodeBatchRequestTooLong = "BatchRequestTooLong" + ErrCodeBatchRequestTooLong = "AWS.SimpleQueueService.BatchRequestTooLong" // ErrCodeEmptyBatchRequest for service response error code - // "EmptyBatchRequest". + // "AWS.SimpleQueueService.EmptyBatchRequest". // // The batch request doesn't contain any entries. - ErrCodeEmptyBatchRequest = "EmptyBatchRequest" + ErrCodeEmptyBatchRequest = "AWS.SimpleQueueService.EmptyBatchRequest" // ErrCodeInvalidAddress for service response error code // "InvalidAddress". @@ -45,10 +45,10 @@ const ( ErrCodeInvalidAttributeValue = "InvalidAttributeValue" // ErrCodeInvalidBatchEntryId for service response error code - // "InvalidBatchEntryId". + // "AWS.SimpleQueueService.InvalidBatchEntryId". // // The Id of a batch entry in a batch request doesn't abide by the specification. - ErrCodeInvalidBatchEntryId = "InvalidBatchEntryId" + ErrCodeInvalidBatchEntryId = "AWS.SimpleQueueService.InvalidBatchEntryId" // ErrCodeInvalidIdFormat for service response error code // "InvalidIdFormat". @@ -119,10 +119,10 @@ const ( ErrCodeKmsThrottled = "KmsThrottled" // ErrCodeMessageNotInflight for service response error code - // "MessageNotInflight". + // "AWS.SimpleQueueService.MessageNotInflight". // // The specified message isn't in flight. - ErrCodeMessageNotInflight = "MessageNotInflight" + ErrCodeMessageNotInflight = "AWS.SimpleQueueService.MessageNotInflight" // ErrCodeOverLimit for service response error code // "OverLimit". @@ -134,33 +134,33 @@ const ( ErrCodeOverLimit = "OverLimit" // ErrCodePurgeQueueInProgress for service response error code - // "PurgeQueueInProgress". + // "AWS.SimpleQueueService.PurgeQueueInProgress". // // Indicates that the specified queue previously received a PurgeQueue request // within the last 60 seconds (the time it can take to delete the messages in // the queue). - ErrCodePurgeQueueInProgress = "PurgeQueueInProgress" + ErrCodePurgeQueueInProgress = "AWS.SimpleQueueService.PurgeQueueInProgress" // ErrCodeQueueDeletedRecently for service response error code - // "QueueDeletedRecently". + // "AWS.SimpleQueueService.QueueDeletedRecently". // // You must wait 60 seconds after deleting a queue before you can create another // queue with the same name. - ErrCodeQueueDeletedRecently = "QueueDeletedRecently" + ErrCodeQueueDeletedRecently = "AWS.SimpleQueueService.QueueDeletedRecently" // ErrCodeQueueDoesNotExist for service response error code - // "QueueDoesNotExist". + // "AWS.SimpleQueueService.NonExistentQueue". // // The specified queue doesn't exist. - ErrCodeQueueDoesNotExist = "QueueDoesNotExist" + ErrCodeQueueDoesNotExist = "AWS.SimpleQueueService.NonExistentQueue" // ErrCodeQueueNameExists for service response error code - // "QueueNameExists". + // "QueueAlreadyExists". // // A queue with this name already exists. Amazon SQS returns this error only // if the request includes attributes whose values differ from those of the // existing queue. - ErrCodeQueueNameExists = "QueueNameExists" + ErrCodeQueueNameExists = "QueueAlreadyExists" // ErrCodeReceiptHandleIsInvalid for service response error code // "ReceiptHandleIsInvalid". @@ -193,16 +193,16 @@ const ( ErrCodeResourceNotFoundException = "ResourceNotFoundException" // ErrCodeTooManyEntriesInBatchRequest for service response error code - // "TooManyEntriesInBatchRequest". + // "AWS.SimpleQueueService.TooManyEntriesInBatchRequest". // // The batch request contains more entries than permissible. - ErrCodeTooManyEntriesInBatchRequest = "TooManyEntriesInBatchRequest" + ErrCodeTooManyEntriesInBatchRequest = "AWS.SimpleQueueService.TooManyEntriesInBatchRequest" // ErrCodeUnsupportedOperation for service response error code - // "UnsupportedOperation". + // "AWS.SimpleQueueService.UnsupportedOperation". // // Error code 400. Unsupported operation. - ErrCodeUnsupportedOperation = "UnsupportedOperation" + ErrCodeUnsupportedOperation = "AWS.SimpleQueueService.UnsupportedOperation" ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ diff --git a/vendor/github.com/cert-manager/cert-manager/LICENSES b/vendor/github.com/cert-manager/cert-manager/LICENSES index b508fb2e3b..22764fe9de 100644 --- a/vendor/github.com/cert-manager/cert-manager/LICENSES +++ b/vendor/github.com/cert-manager/cert-manager/LICENSES @@ -35,13 +35,13 @@ github.com/digitalocean/godo,https://github.com/digitalocean/godo/blob/v1.102.1/ github.com/emicklei/go-restful/v3,https://github.com/emicklei/go-restful/blob/v3.11.0/LICENSE,MIT github.com/evanphx/json-patch,https://github.com/evanphx/json-patch/blob/v5.6.0/LICENSE,BSD-3-Clause github.com/evanphx/json-patch/v5,https://github.com/evanphx/json-patch/blob/v5.6.0/v5/LICENSE,BSD-3-Clause -github.com/felixge/httpsnoop,https://github.com/felixge/httpsnoop/blob/v1.0.3/LICENSE.txt,MIT +github.com/felixge/httpsnoop,https://github.com/felixge/httpsnoop/blob/v1.0.4/LICENSE.txt,MIT github.com/fsnotify/fsnotify,https://github.com/fsnotify/fsnotify/blob/v1.6.0/LICENSE,BSD-3-Clause github.com/go-asn1-ber/asn1-ber,https://github.com/go-asn1-ber/asn1-ber/blob/v1.5.4/LICENSE,MIT -github.com/go-jose/go-jose/v3,https://github.com/go-jose/go-jose/blob/v3.0.0/LICENSE,Apache-2.0 -github.com/go-jose/go-jose/v3/json,https://github.com/go-jose/go-jose/blob/v3.0.0/json/LICENSE,BSD-3-Clause +github.com/go-jose/go-jose/v3,https://github.com/go-jose/go-jose/blob/v3.0.1/LICENSE,Apache-2.0 +github.com/go-jose/go-jose/v3/json,https://github.com/go-jose/go-jose/blob/v3.0.1/json/LICENSE,BSD-3-Clause github.com/go-ldap/ldap/v3,https://github.com/go-ldap/ldap/blob/v3.4.5/v3/LICENSE,MIT -github.com/go-logr/logr,https://github.com/go-logr/logr/blob/v1.2.4/LICENSE,Apache-2.0 +github.com/go-logr/logr,https://github.com/go-logr/logr/blob/v1.3.0/LICENSE,Apache-2.0 github.com/go-logr/stdr,https://github.com/go-logr/stdr/blob/v1.2.2/LICENSE,Apache-2.0 github.com/go-logr/zapr,https://github.com/go-logr/zapr/blob/v1.2.4/LICENSE,Apache-2.0 github.com/go-openapi/jsonpointer,https://github.com/go-openapi/jsonpointer/blob/v0.19.6/LICENSE,Apache-2.0 @@ -55,7 +55,7 @@ github.com/golang/snappy,https://github.com/golang/snappy/blob/v0.0.4/LICENSE,BS github.com/google/cel-go,https://github.com/google/cel-go/blob/v0.16.0/LICENSE,Apache-2.0 github.com/google/cel-go,https://github.com/google/cel-go/blob/v0.16.0/LICENSE,BSD-3-Clause github.com/google/gnostic-models,https://github.com/google/gnostic-models/blob/v0.6.8/LICENSE,Apache-2.0 -github.com/google/go-cmp/cmp,https://github.com/google/go-cmp/blob/v0.5.9/LICENSE,BSD-3-Clause +github.com/google/go-cmp/cmp,https://github.com/google/go-cmp/blob/v0.6.0/LICENSE,BSD-3-Clause github.com/google/go-querystring/query,https://github.com/google/go-querystring/blob/v1.1.0/LICENSE,BSD-3-Clause github.com/google/gofuzz,https://github.com/google/gofuzz/blob/v1.2.0/LICENSE,Apache-2.0 github.com/google/s2a-go,https://github.com/google/s2a-go/blob/v0.1.7/LICENSE.md,Apache-2.0 @@ -109,14 +109,14 @@ go.etcd.io/etcd/api/v3,https://github.com/etcd-io/etcd/blob/api/v3.5.9/api/LICEN go.etcd.io/etcd/client/pkg/v3,https://github.com/etcd-io/etcd/blob/client/pkg/v3.5.9/client/pkg/LICENSE,Apache-2.0 go.etcd.io/etcd/client/v3,https://github.com/etcd-io/etcd/blob/client/v3.5.9/client/v3/LICENSE,Apache-2.0 go.opencensus.io,https://github.com/census-instrumentation/opencensus-go/blob/v0.24.0/LICENSE,Apache-2.0 -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc,https://github.com/open-telemetry/opentelemetry-go-contrib/blob/instrumentation/google.golang.org/grpc/otelgrpc/v0.45.0/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE,Apache-2.0 -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp,https://github.com/open-telemetry/opentelemetry-go-contrib/blob/instrumentation/net/http/otelhttp/v0.44.0/instrumentation/net/http/otelhttp/LICENSE,Apache-2.0 -go.opentelemetry.io/otel,https://github.com/open-telemetry/opentelemetry-go/blob/v1.19.0/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/exporters/otlp/otlptrace,https://github.com/open-telemetry/opentelemetry-go/blob/exporters/otlp/otlptrace/v1.19.0/exporters/otlp/otlptrace/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc,https://github.com/open-telemetry/opentelemetry-go/blob/exporters/otlp/otlptrace/otlptracegrpc/v1.19.0/exporters/otlp/otlptrace/otlptracegrpc/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/metric,https://github.com/open-telemetry/opentelemetry-go/blob/metric/v1.19.0/metric/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/sdk,https://github.com/open-telemetry/opentelemetry-go/blob/sdk/v1.19.0/sdk/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/trace,https://github.com/open-telemetry/opentelemetry-go/blob/trace/v1.19.0/trace/LICENSE,Apache-2.0 +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc,https://github.com/open-telemetry/opentelemetry-go-contrib/blob/instrumentation/google.golang.org/grpc/otelgrpc/v0.46.0/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE,Apache-2.0 +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp,https://github.com/open-telemetry/opentelemetry-go-contrib/blob/instrumentation/net/http/otelhttp/v0.46.0/instrumentation/net/http/otelhttp/LICENSE,Apache-2.0 +go.opentelemetry.io/otel,https://github.com/open-telemetry/opentelemetry-go/blob/v1.20.0/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/exporters/otlp/otlptrace,https://github.com/open-telemetry/opentelemetry-go/blob/exporters/otlp/otlptrace/v1.20.0/exporters/otlp/otlptrace/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc,https://github.com/open-telemetry/opentelemetry-go/blob/exporters/otlp/otlptrace/otlptracegrpc/v1.20.0/exporters/otlp/otlptrace/otlptracegrpc/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/metric,https://github.com/open-telemetry/opentelemetry-go/blob/metric/v1.20.0/metric/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/sdk,https://github.com/open-telemetry/opentelemetry-go/blob/sdk/v1.20.0/sdk/LICENSE,Apache-2.0 +go.opentelemetry.io/otel/trace,https://github.com/open-telemetry/opentelemetry-go/blob/trace/v1.20.0/trace/LICENSE,Apache-2.0 go.opentelemetry.io/proto/otlp,https://github.com/open-telemetry/opentelemetry-proto-go/blob/otlp/v1.0.0/otlp/LICENSE,Apache-2.0 go.uber.org/multierr,https://github.com/uber-go/multierr/blob/v1.11.0/LICENSE.txt,MIT go.uber.org/zap,https://github.com/uber-go/zap/blob/v1.25.0/LICENSE.txt,MIT @@ -125,16 +125,16 @@ golang.org/x/exp,https://cs.opensource.google/go/x/exp/+/92128663:LICENSE,BSD-3- golang.org/x/net,https://cs.opensource.google/go/x/net/+/v0.17.0:LICENSE,BSD-3-Clause golang.org/x/oauth2,https://cs.opensource.google/go/x/oauth2/+/v0.12.0:LICENSE,BSD-3-Clause golang.org/x/sync,https://cs.opensource.google/go/x/sync/+/v0.3.0:LICENSE,BSD-3-Clause -golang.org/x/sys,https://cs.opensource.google/go/x/sys/+/v0.13.0:LICENSE,BSD-3-Clause +golang.org/x/sys,https://cs.opensource.google/go/x/sys/+/v0.14.0:LICENSE,BSD-3-Clause golang.org/x/term,https://cs.opensource.google/go/x/term/+/v0.13.0:LICENSE,BSD-3-Clause golang.org/x/text,https://cs.opensource.google/go/x/text/+/v0.13.0:LICENSE,BSD-3-Clause golang.org/x/time/rate,https://cs.opensource.google/go/x/time/+/v0.3.0:LICENSE,BSD-3-Clause gomodules.xyz/jsonpatch/v2,https://github.com/gomodules/jsonpatch/blob/v2.4.0/v2/LICENSE,Apache-2.0 google.golang.org/api,https://github.com/googleapis/google-api-go-client/blob/v0.140.0/LICENSE,BSD-3-Clause google.golang.org/api/internal/third_party/uritemplates,https://github.com/googleapis/google-api-go-client/blob/v0.140.0/internal/third_party/uritemplates/LICENSE,BSD-3-Clause -google.golang.org/genproto/googleapis/api,https://github.com/googleapis/go-genproto/blob/f966b187b2e5/googleapis/api/LICENSE,Apache-2.0 +google.golang.org/genproto/googleapis/api,https://github.com/googleapis/go-genproto/blob/b8732ec3820d/googleapis/api/LICENSE,Apache-2.0 google.golang.org/genproto/googleapis/rpc,https://github.com/googleapis/go-genproto/blob/2d3300fd4832/googleapis/rpc/LICENSE,Apache-2.0 -google.golang.org/grpc,https://github.com/grpc/grpc-go/blob/v1.58.3/LICENSE,Apache-2.0 +google.golang.org/grpc,https://github.com/grpc/grpc-go/blob/v1.59.0/LICENSE,Apache-2.0 google.golang.org/protobuf,https://github.com/protocolbuffers/protobuf-go/blob/v1.31.0/LICENSE,BSD-3-Clause gopkg.in/inf.v0,https://github.com/go-inf/inf/blob/v0.9.1/LICENSE,BSD-3-Clause gopkg.in/ini.v1,https://github.com/go-ini/ini/blob/v1.62.0/LICENSE,Apache-2.0 diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index a8c29bfbd5..8969526a6e 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -91,11 +91,12 @@ logr design but also left out some parts and changed others: | Adding a name to a logger | `WithName` | no API | | Modify verbosity of log entries in a call chain | `V` | no API | | Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | +| Pass context for extracting additional values | no API | API variants like `InfoCtx` | The high-level slog API is explicitly meant to be one of many different APIs that can be layered on top of a shared `slog.Handler`. logr is one such -alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) -package. +alternative API, with [interoperability](#slog-interoperability) provided by +some conversion functions. ### Inspiration @@ -145,24 +146,24 @@ There are implementations for the following logging libraries: ## slog interoperability Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` -and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and -`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. +and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and +`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`. As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level -slog API. `slogr` itself leaves that to the caller. +slog API. -## Using a `logr.Sink` as backend for slog +### Using a `logr.LogSink` as backend for slog Ideally, a logr sink implementation should support both logr and slog by -implementing both the normal logr interface(s) and `slogr.SlogSink`. Because +implementing both the normal logr interface(s) and `SlogSink`. Because of a conflict in the parameters of the common `Enabled` method, it is [not possible to implement both slog.Handler and logr.Sink in the same type](https://github.com/golang/go/issues/59110). If both are supported, log calls can go from the high-level APIs to the backend -without the need to convert parameters. `NewLogr` and `NewSlogHandler` can +without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can convert back and forth without adding additional wrappers, with one exception: when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then -`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future +`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future log calls. Such an implementation should also support values that implement specific @@ -187,13 +188,13 @@ Not supporting slog has several drawbacks: These drawbacks are severe enough that applications using a mixture of slog and logr should switch to a different backend. -## Using a `slog.Handler` as backend for logr +### Using a `slog.Handler` as backend for logr Using a plain `slog.Handler` without support for logr works better than the other direction: - All logr verbosity levels can be mapped 1:1 to their corresponding slog level by negating them. -- Stack unwinding is done by the `slogr.SlogSink` and the resulting program +- Stack unwinding is done by the `SlogSink` and the resulting program counter is passed to the `slog.Handler`. - Names added via `Logger.WithName` are gathered and recorded in an additional attribute with `logger` as key and the names separated by slash as value. @@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility with logr implementations without slog support is not important, then `slog.Valuer` is sufficient. -## Context support for slog +### Context support for slog Storing a logger in a `context.Context` is not supported by -slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this -to fill this gap: +slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be +used to fill this gap. They store and retrieve a `slog.Logger` pointer +under the same context key that is also used by `NewContext` and +`FromContext` for `logr.Logger` value. - func HandlerFromContext(ctx context.Context) slog.Handler { - logger, err := logr.FromContext(ctx) - if err == nil { - return slogr.NewSlogHandler(logger) - } - return slog.Default().Handler() - } +When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will +automatically convert the `slog.Logger` to a +`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction. - func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { - return logr.NewContext(ctx, slogr.NewLogr(handler)) - } +With this approach, binaries which use either slog or logr are as efficient as +possible with no unnecessary allocations. This is also why the API stores a +`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger` +on retrieval would need to allocate one. -The downside is that storing and retrieving a `slog.Handler` needs more -allocations compared to using a `logr.Logger`. Therefore the recommendation is -to use the `logr.Logger` API in code which uses contextual logging. +The downside is that switching back and forth needs more allocations. Because +logr is the API that is already in use by different packages, in particular +Kubernetes, the recommendation is to use the `logr.Logger` API in code which +uses contextual logging. + +An alternative to adding values to a logger and storing that logger in the +context is to store the values in the context and to configure a logging +backend to extract those values when emitting log entries. This only works when +log calls are passed the context, which is not supported by the logr API. + +With the slog API, it is possible, but not +required. https://github.com/veqryn/slog-context is a package for slog which +provides additional support code for this approach. It also contains wrappers +for the context functions in logr, so developers who prefer to not use the logr +APIs directly can use those instead and the resulting code will still be +interoperable with logr. ## FAQ diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go new file mode 100644 index 0000000000..de8bcc3ad8 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// contextKey is how we find Loggers in a context.Context. With Go < 1.21, +// the value is always a Logger value. With Go >= 1.21, the value can be a +// Logger value or a slog.Logger pointer. +type contextKey struct{} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go new file mode 100644 index 0000000000..f012f9a18e --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_noslog.go @@ -0,0 +1,49 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go new file mode 100644 index 0000000000..065ef0b828 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_slog.go @@ -0,0 +1,83 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "fmt" + "log/slog" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + v := ctx.Value(contextKey{}) + if v == nil { + return Logger{}, notFoundError{} + } + + switch v := v.(type) { + case Logger: + return v, nil + case *slog.Logger: + return FromSlogHandler(v.Handler()), nil + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found. +func FromContextAsSlogLogger(ctx context.Context) *slog.Logger { + v := ctx.Value(contextKey{}) + if v == nil { + return nil + } + + switch v := v.(type) { + case Logger: + return slog.New(ToSlogHandler(v)) + case *slog.Logger: + return v + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if logger, err := FromContext(ctx); err == nil { + return logger + } + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the +// provided slog.Logger. +func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index 12e5807cc5..fb2f866f4b 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -100,6 +100,11 @@ type Options struct { // details, see docs for Go's time.Layout. TimestampFormat string + // LogInfoLevel tells funcr what key to use to log the info level. + // If not specified, the info level will be logged as "level". + // If this is set to "", the info level will not be logged at all. + LogInfoLevel *string + // Verbosity tells funcr which V logs to produce. Higher values enable // more logs. Info logs at or below this level will be written, while logs // above this level will be discarded. @@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { if opts.MaxLogDepth == 0 { opts.MaxLogDepth = defaultMaxLogDepth } + if opts.LogInfoLevel == nil { + opts.LogInfoLevel = new(string) + *opts.LogInfoLevel = "level" + } f := Formatter{ outputFormat: outfmt, prefix: "", @@ -227,12 +236,15 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - depth int - opts *Options + outputFormat outputFormat + prefix string + values []any + valuesStr string + parentValuesStr string + depth int + opts *Options + group string // for slog groups + groupDepth int } // outputFormat indicates which outputFormat to use. @@ -253,33 +265,62 @@ func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) if f.outputFormat == outputJSON { - buf.WriteByte('{') + buf.WriteByte('{') // for the whole line } + vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } f.flatten(buf, vals, false, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if len(f.valuesStr) > 0 { + + if f.parentValuesStr != "" { if continuing { - if f.outputFormat == outputJSON { - buf.WriteByte(',') - } else { - buf.WriteByte(' ') - } + buf.WriteByte(f.comma()) } + buf.WriteString(f.parentValuesStr) continuing = true - buf.WriteString(f.valuesStr) } + + groupDepth := f.groupDepth + if f.group != "" { + if f.valuesStr != "" || len(args) != 0 { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') // for the group + continuing = false + } else { + // The group was empty + groupDepth-- + } + } + + if f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.valuesStr) + continuing = true + } + vals = args if hook := f.opts.RenderArgsHook; hook != nil { vals = hook(f.sanitize(vals)) } f.flatten(buf, vals, continuing, true) // escape user-provided keys - if f.outputFormat == outputJSON { - buf.WriteByte('}') + + for i := 0; i < groupDepth; i++ { + buf.WriteByte('}') // for the groups } + + if f.outputFormat == outputJSON { + buf.WriteByte('}') // for the whole line + } + return buf.String() } @@ -298,9 +339,16 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc if len(kvList)%2 != 0 { kvList = append(kvList, noValue) } + copied := false for i := 0; i < len(kvList); i += 2 { k, ok := kvList[i].(string) if !ok { + if !copied { + newList := make([]any, len(kvList)) + copy(newList, kvList) + kvList = newList + copied = true + } k = f.nonStringKey(kvList[i]) kvList[i] = k } @@ -308,7 +356,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc if i > 0 || continuing { if f.outputFormat == outputJSON { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } else { // In theory the format could be something we don't understand. In // practice, we control it, so it won't be. @@ -316,24 +364,35 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } } - if escapeKeys { - buf.WriteString(prettyString(k)) - } else { - // this is faster - buf.WriteByte('"') - buf.WriteString(k) - buf.WriteByte('"') - } - if f.outputFormat == outputJSON { - buf.WriteByte(':') - } else { - buf.WriteByte('=') - } + buf.WriteString(f.quoted(k, escapeKeys)) + buf.WriteByte(f.colon()) buf.WriteString(f.pretty(v)) } return kvList } +func (f Formatter) quoted(str string, escape bool) string { + if escape { + return prettyString(str) + } + // this is faster + return `"` + str + `"` +} + +func (f Formatter) comma() byte { + if f.outputFormat == outputJSON { + return ',' + } + return ' ' +} + +func (f Formatter) colon() byte { + if f.outputFormat == outputJSON { + return ':' + } + return '=' +} + func (f Formatter) pretty(value any) string { return f.prettyWithFlags(value, 0, 0) } @@ -407,12 +466,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { } for i := 0; i < len(v); i += 2 { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } k, _ := v[i].(string) // sanitize() above means no need to check success // arbitrary keys might need escaping buf.WriteString(prettyString(k)) - buf.WriteByte(':') + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) } if flags&flagRawStruct == 0 { @@ -481,7 +540,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { continue } if printComma { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } printComma = true // if we got here, we are rendering a field if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { @@ -492,10 +551,8 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { name = fld.Name } // field names can't contain characters which need escaping - buf.WriteByte('"') - buf.WriteString(name) - buf.WriteByte('"') - buf.WriteByte(':') + buf.WriteString(f.quoted(name, false)) + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) } if flags&flagRawStruct == 0 { @@ -520,7 +577,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { buf.WriteByte('[') for i := 0; i < v.Len(); i++ { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } e := v.Index(i) buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) @@ -534,7 +591,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { i := 0 for it.Next() { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } // If a map key supports TextMarshaler, use it. keystr := "" @@ -556,7 +613,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { } } buf.WriteString(keystr) - buf.WriteByte(':') + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) i++ } @@ -706,6 +763,53 @@ func (f Formatter) sanitize(kvList []any) []any { return kvList } +// startGroup opens a new group scope (basically a sub-struct), which locks all +// the current saved values and starts them anew. This is needed to satisfy +// slog. +func (f *Formatter) startGroup(group string) { + // Unnamed groups are just inlined. + if group == "" { + return + } + + // Any saved values can no longer be changed. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + continuing := false + + if f.parentValuesStr != "" { + buf.WriteString(f.parentValuesStr) + continuing = true + } + + if f.group != "" && f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') // for the group + continuing = false + } + + if f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.valuesStr) + } + + // NOTE: We don't close the scope here - that's done later, when a log line + // is actually rendered (because we have N scopes to close). + + f.parentValuesStr = buf.String() + + // Start collecting new values. + f.group = group + f.groupDepth++ + f.valuesStr = "" + f.values = nil +} + // Init configures this Formatter from runtime info, such as the call depth // imposed by logr itself. // Note that this receiver is a pointer, so depth can be saved. @@ -740,7 +844,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args if policy := f.opts.LogCaller; policy == All || policy == Info { args = append(args, "caller", f.caller()) } - args = append(args, "level", level, "msg", msg) + if key := *f.opts.LogInfoLevel; key != "" { + args = append(args, key, level) + } + args = append(args, "msg", msg) return prefix, f.render(args, kvList) } diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go new file mode 100644 index 0000000000..7bd84761e2 --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go @@ -0,0 +1,105 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package funcr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +var _ logr.SlogSink = &fnlogger{} + +const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink + +func (l fnlogger) Handle(_ context.Context, record slog.Record) error { + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = attrToKVs(attr, kvList) + return true + }) + + if record.Level >= slog.LevelError { + l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...) + } + return nil +} + +func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = attrToKVs(attr, kvList) + } + l.AddValues(kvList) + return &l +} + +func (l fnlogger) WithGroup(name string) logr.SlogSink { + l.startGroup(name) + return &l +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, grpKVs) + } + if attr.Key == "" { + // slog says we have to inline these + kvList = append(kvList, grpKVs...) + } else { + kvList = append(kvList, attr.Key, PseudoStruct(grpKVs)) + } + } else if attr.Key != "" { + kvList = append(kvList, attr.Key, attrVal.Any()) + } + + return kvList +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l fnlogger) levelFromSlog(level slog.Level) int { + result := -level + if result < 0 { + result = 0 // because LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index 2a5075a180..b4428e105b 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -207,10 +207,6 @@ limitations under the License. // those. package logr -import ( - "context" -) - // New returns a new Logger instance. This is primarily used by libraries // implementing LogSink, rather than end users. Passing a nil sink will create // a Logger which discards all log lines. @@ -410,45 +406,6 @@ func (l Logger) IsZero() bool { return l.sink == nil } -// contextKey is how we find Loggers in a context.Context. -type contextKey struct{} - -// FromContext returns a Logger from ctx or an error if no Logger is found. -func FromContext(ctx context.Context) (Logger, error) { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v, nil - } - - return Logger{}, notFoundError{} -} - -// notFoundError exists to carry an IsNotFound method. -type notFoundError struct{} - -func (notFoundError) Error() string { - return "no logr.Logger was present" -} - -func (notFoundError) IsNotFound() bool { - return true -} - -// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this -// returns a Logger that discards all log messages. -func FromContextOrDiscard(ctx context.Context) Logger { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v - } - - return Discard() -} - -// NewContext returns a new Context, derived from ctx, which carries the -// provided Logger. -func NewContext(ctx context.Context, logger Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) -} - // RuntimeInfo holds information that the logr "core" library knows which // LogSinks might want to know. type RuntimeInfo struct { diff --git a/vendor/github.com/go-logr/logr/slogr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go similarity index 63% rename from vendor/github.com/go-logr/logr/slogr/sloghandler.go rename to vendor/github.com/go-logr/logr/sloghandler.go index ec6725ce2c..82d1ba4948 100644 --- a/vendor/github.com/go-logr/logr/slogr/sloghandler.go +++ b/vendor/github.com/go-logr/logr/sloghandler.go @@ -17,18 +17,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package slogr +package logr import ( "context" "log/slog" - - "github.com/go-logr/logr" ) type slogHandler struct { // May be nil, in which case all logs get discarded. - sink logr.LogSink + sink LogSink // Non-nil if sink is non-nil and implements SlogSink. slogSink SlogSink @@ -54,7 +52,7 @@ func (l *slogHandler) GetLevel() slog.Level { return l.levelBias } -func (l *slogHandler) Enabled(ctx context.Context, level slog.Level) bool { +func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool { return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) } @@ -72,9 +70,7 @@ func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { kvList := make([]any, 0, 2*record.NumAttrs()) record.Attrs(func(attr slog.Attr) bool { - if attr.Key != "" { - kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) - } + kvList = attrToKVs(attr, l.groupPrefix, kvList) return true }) if record.Level >= slog.LevelError { @@ -90,15 +86,15 @@ func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { // are called by Handle, code in slog gets skipped. // // This offset currently (Go 1.21.0) works for calls through -// slog.New(NewSlogHandler(...)). There's no guarantee that the call +// slog.New(ToSlogHandler(...)). There's no guarantee that the call // chain won't change. Wrapping the handler will also break unwinding. It's // still better than not adjusting at all.... // -// This cannot be done when constructing the handler because NewLogr needs +// This cannot be done when constructing the handler because FromSlogHandler needs // access to the original sink without this adjustment. A second copy would // work, but then WithAttrs would have to be called for both of them. -func (l *slogHandler) sinkWithCallDepth() logr.LogSink { - if sink, ok := l.sink.(logr.CallDepthLogSink); ok { +func (l *slogHandler) sinkWithCallDepth() LogSink { + if sink, ok := l.sink.(CallDepthLogSink); ok { return sink.WithCallDepth(2) } return l.sink @@ -109,60 +105,88 @@ func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { return l } - copy := *l + clone := *l if l.slogSink != nil { - copy.slogSink = l.slogSink.WithAttrs(attrs) - copy.sink = copy.slogSink + clone.slogSink = l.slogSink.WithAttrs(attrs) + clone.sink = clone.slogSink } else { kvList := make([]any, 0, 2*len(attrs)) for _, attr := range attrs { - if attr.Key != "" { - kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) - } + kvList = attrToKVs(attr, l.groupPrefix, kvList) } - copy.sink = l.sink.WithValues(kvList...) + clone.sink = l.sink.WithValues(kvList...) } - return © + return &clone } func (l *slogHandler) WithGroup(name string) slog.Handler { if l.sink == nil { return l } - copy := *l - if l.slogSink != nil { - copy.slogSink = l.slogSink.WithGroup(name) - copy.sink = l.slogSink - } else { - copy.groupPrefix = copy.addGroupPrefix(name) + if name == "" { + // slog says to inline empty groups + return l } - return © + clone := *l + if l.slogSink != nil { + clone.slogSink = l.slogSink.WithGroup(name) + clone.sink = clone.slogSink + } else { + clone.groupPrefix = addPrefix(clone.groupPrefix, name) + } + return &clone } -func (l *slogHandler) addGroupPrefix(name string) string { - if l.groupPrefix == "" { +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + prefix := groupPrefix + if attr.Key != "" { + prefix = addPrefix(groupPrefix, attr.Key) + } + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, prefix, grpKVs) + } + kvList = append(kvList, grpKVs...) + } else if attr.Key != "" { + kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any()) + } + + return kvList +} + +func addPrefix(prefix, name string) string { + if prefix == "" { return name } - return l.groupPrefix + groupSeparator + name + if name == "" { + return prefix + } + return prefix + groupSeparator + name } // levelFromSlog adjusts the level by the logger's verbosity and negates it. // It ensures that the result is >= 0. This is necessary because the result is -// passed to a logr.LogSink and that API did not historically document whether +// passed to a LogSink and that API did not historically document whether // levels could be negative or what that meant. // // Some example usage: -// logrV0 := getMyLogger() -// logrV2 := logrV0.V(2) -// slogV2 := slog.New(slogr.NewSlogHandler(logrV2)) -// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) -// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) -// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) func (l *slogHandler) levelFromSlog(level slog.Level) int { result := -level - result += l.levelBias // in case the original logr.Logger had a V level + result += l.levelBias // in case the original Logger had a V level if result < 0 { - result = 0 // because logr.LogSink doesn't expect negative V levels + result = 0 // because LogSink doesn't expect negative V levels } return int(result) } diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go new file mode 100644 index 0000000000..28a83d0243 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr.go @@ -0,0 +1,100 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" +) + +// FromSlogHandler returns a Logger which writes to the slog.Handler. +// +// The logr verbosity level is mapped to slog levels such that V(0) becomes +// slog.LevelInfo and V(4) becomes slog.LevelDebug. +func FromSlogHandler(handler slog.Handler) Logger { + if handler, ok := handler.(*slogHandler); ok { + if handler.sink == nil { + return Discard() + } + return New(handler.sink).V(int(handler.levelBias)) + } + return New(&slogSink{handler: handler}) +} + +// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger. +// +// The returned logger writes all records with level >= slog.LevelError as +// error log entries with LogSink.Error, regardless of the verbosity level of +// the Logger: +// +// logger := +// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) +// +// The level of all other records gets reduced by the verbosity +// level of the Logger and the result is negated. If it happens +// to be negative, then it gets replaced by zero because a LogSink +// is not expected to handled negative levels: +// +// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) +// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +func ToSlogHandler(logger Logger) slog.Handler { + if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { + return sink.handler + } + + handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} + if slogSink, ok := handler.sink.(SlogSink); ok { + handler.slogSink = slogSink + } + return handler +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. It then should +// also support special slog values like slog.Group. When used as a +// slog.Handler, the advantages are: +// +// - stack unwinding gets avoided in favor of logging the pre-recorded PC, +// as intended by slog +// - proper grouping of key/value pairs via WithGroup +// - verbosity levels > slog.LevelInfo can be recorded +// - less overhead +// +// Both APIs (Logger and slog.Logger/Handler) then are supported equally +// well. Developers can pick whatever API suits them better and/or mix +// packages which use either API in the same binary with a common logging +// implementation. +// +// This interface is necessary because the type implementing the LogSink +// interface cannot also implement the slog.Handler interface due to the +// different prototype of the common Enabled method. +// +// An implementation could support both interfaces in two different types, but then +// additional interfaces would be needed to convert between those types in FromSlogHandler +// and ToSlogHandler. +type SlogSink interface { + LogSink + + Handle(ctx context.Context, record slog.Record) error + WithAttrs(attrs []slog.Attr) SlogSink + WithGroup(name string) SlogSink +} diff --git a/vendor/github.com/go-logr/logr/slogr/slogr.go b/vendor/github.com/go-logr/logr/slogr/slogr.go index eb519ae23f..36432c56fd 100644 --- a/vendor/github.com/go-logr/logr/slogr/slogr.go +++ b/vendor/github.com/go-logr/logr/slogr/slogr.go @@ -23,10 +23,11 @@ limitations under the License. // // See the README in the top-level [./logr] package for a discussion of // interoperability. +// +// Deprecated: use the main logr package instead. package slogr import ( - "context" "log/slog" "github.com/go-logr/logr" @@ -34,75 +35,27 @@ import ( // NewLogr returns a logr.Logger which writes to the slog.Handler. // -// The logr verbosity level is mapped to slog levels such that V(0) becomes -// slog.LevelInfo and V(4) becomes slog.LevelDebug. +// Deprecated: use [logr.FromSlogHandler] instead. func NewLogr(handler slog.Handler) logr.Logger { - if handler, ok := handler.(*slogHandler); ok { - if handler.sink == nil { - return logr.Discard() - } - return logr.New(handler.sink).V(int(handler.levelBias)) - } - return logr.New(&slogSink{handler: handler}) + return logr.FromSlogHandler(handler) } // NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. // -// The returned logger writes all records with level >= slog.LevelError as -// error log entries with LogSink.Error, regardless of the verbosity level of -// the logr.Logger: -// -// logger := -// slog.New(NewSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) -// -// The level of all other records gets reduced by the verbosity -// level of the logr.Logger and the result is negated. If it happens -// to be negative, then it gets replaced by zero because a LogSink -// is not expected to handled negative levels: -// -// slog.New(NewSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) -// slog.New(NewSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) -// slog.New(NewSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) -// slog.New(NewSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +// Deprecated: use [logr.ToSlogHandler] instead. func NewSlogHandler(logger logr.Logger) slog.Handler { - if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { - return sink.handler - } + return logr.ToSlogHandler(logger) +} - handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} - if slogSink, ok := handler.sink.(SlogSink); ok { - handler.slogSink = slogSink - } - return handler +// ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. +// +// Deprecated: use [logr.ToSlogHandler] instead. +func ToSlogHandler(logger logr.Logger) slog.Handler { + return logr.ToSlogHandler(logger) } // SlogSink is an optional interface that a LogSink can implement to support -// logging through the slog.Logger or slog.Handler APIs better. It then should -// also support special slog values like slog.Group. When used as a -// slog.Handler, the advantages are: +// logging through the slog.Logger or slog.Handler APIs better. // -// - stack unwinding gets avoided in favor of logging the pre-recorded PC, -// as intended by slog -// - proper grouping of key/value pairs via WithGroup -// - verbosity levels > slog.LevelInfo can be recorded -// - less overhead -// -// Both APIs (logr.Logger and slog.Logger/Handler) then are supported equally -// well. Developers can pick whatever API suits them better and/or mix -// packages which use either API in the same binary with a common logging -// implementation. -// -// This interface is necessary because the type implementing the LogSink -// interface cannot also implement the slog.Handler interface due to the -// different prototype of the common Enabled method. -// -// An implementation could support both interfaces in two different types, but then -// additional interfaces would be needed to convert between those types in NewLogr -// and NewSlogHandler. -type SlogSink interface { - logr.LogSink - - Handle(ctx context.Context, record slog.Record) error - WithAttrs(attrs []slog.Attr) SlogSink - WithGroup(name string) SlogSink -} +// Deprecated: use [logr.SlogSink] instead. +type SlogSink = logr.SlogSink diff --git a/vendor/github.com/go-logr/logr/slogr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go similarity index 82% rename from vendor/github.com/go-logr/logr/slogr/slogsink.go rename to vendor/github.com/go-logr/logr/slogsink.go index 6fbac561d9..4060fcbc2b 100644 --- a/vendor/github.com/go-logr/logr/slogr/slogsink.go +++ b/vendor/github.com/go-logr/logr/slogsink.go @@ -17,24 +17,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package slogr +package logr import ( "context" "log/slog" "runtime" "time" - - "github.com/go-logr/logr" ) var ( - _ logr.LogSink = &slogSink{} - _ logr.CallDepthLogSink = &slogSink{} - _ Underlier = &slogSink{} + _ LogSink = &slogSink{} + _ CallDepthLogSink = &slogSink{} + _ Underlier = &slogSink{} ) -// Underlier is implemented by the LogSink returned by NewLogr. +// Underlier is implemented by the LogSink returned by NewFromLogHandler. type Underlier interface { // GetUnderlying returns the Handler used by the LogSink. GetUnderlying() slog.Handler @@ -54,7 +52,7 @@ type slogSink struct { handler slog.Handler } -func (l *slogSink) Init(info logr.RuntimeInfo) { +func (l *slogSink) Init(info RuntimeInfo) { l.callDepth = info.CallDepth } @@ -62,7 +60,7 @@ func (l *slogSink) GetUnderlying() slog.Handler { return l.handler } -func (l *slogSink) WithCallDepth(depth int) logr.LogSink { +func (l *slogSink) WithCallDepth(depth int) LogSink { newLogger := *l newLogger.callDepth += depth return &newLogger @@ -93,18 +91,18 @@ func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interf record.AddAttrs(slog.Any(errKey, err)) } record.Add(kvList...) - l.handler.Handle(context.Background(), record) + _ = l.handler.Handle(context.Background(), record) } -func (l slogSink) WithName(name string) logr.LogSink { +func (l slogSink) WithName(name string) LogSink { if l.name != "" { - l.name = l.name + "/" + l.name += "/" } l.name += name return &l } -func (l slogSink) WithValues(kvList ...interface{}) logr.LogSink { +func (l slogSink) WithValues(kvList ...interface{}) LogSink { l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) return &l } diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index 7ed347d3ad..c9fb829dc6 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + ## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go index e6ef06cdc8..c351129279 100644 --- a/vendor/github.com/google/uuid/time.go +++ b/vendor/github.com/google/uuid/time.go @@ -108,12 +108,23 @@ func setClockSequence(seq int) { } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t } // ClockSequence returns the clock sequence encoded in uuid. diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index dc75f7d990..5232b48678 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -186,6 +186,59 @@ func Must(uuid UUID, err error) UUID { return uuid } +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 0000000000..339a959a7a --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 0000000000..ba9dd5eb68 --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,75 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t := timeNow().UnixMilli() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (uuid[6] & 0x0F) + // uuid[8] has already has right version +} diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/client.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/client.go index 7853580601..570066ac04 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/client.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/client.go @@ -286,8 +286,8 @@ func (c *Client) Do(r *http.Request, v interface{}) (*Response, error) { return response, fmt.Errorf("hcloud: error reading response meta data: %s", err) } - if resp.StatusCode >= 400 && resp.StatusCode <= 599 { - err = errorFromResponse(resp, body) + if response.StatusCode >= 400 && response.StatusCode <= 599 { + err = errorFromResponse(response, body) if err == nil { err = fmt.Errorf("hcloud: server responded with status code %d", resp.StatusCode) } else if IsError(err, ErrorCodeConflict) { @@ -359,7 +359,7 @@ func dumpRequest(r *http.Request) ([]byte, error) { return dumpReq, nil } -func errorFromResponse(resp *http.Response, body []byte) error { +func errorFromResponse(resp *Response, body []byte) error { if !strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") { return nil } @@ -371,7 +371,10 @@ func errorFromResponse(resp *http.Response, body []byte) error { if respBody.Error.Code == "" && respBody.Error.Message == "" { return nil } - return ErrorFromSchema(respBody.Error) + + hcErr := ErrorFromSchema(respBody.Error) + hcErr.response = resp + return hcErr } // Response represents a response from the API. It embeds http.Response. diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/error.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/error.go index ac689d1104..653043e6da 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/error.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/error.go @@ -94,12 +94,19 @@ type Error struct { Code ErrorCode Message string Details interface{} + + response *Response } func (e Error) Error() string { return fmt.Sprintf("%s (%s)", e.Message, e.Code) } +// Response returns the [Response] that contained the error if available. +func (e Error) Response() *Response { + return e.response +} + // ErrorDetailsInvalidInput contains the details of an 'invalid_input' error. type ErrorDetailsInvalidInput struct { Fields []ErrorDetailsInvalidInputField diff --git a/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go b/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go index 682b8eaa3d..b4ce367a16 100644 --- a/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go +++ b/vendor/github.com/hetznercloud/hcloud-go/hcloud/hcloud.go @@ -2,4 +2,4 @@ package hcloud // Version is the library's version following Semantic Versioning. -const Version = "1.52.0" // x-release-please-version +const Version = "1.53.0" // x-release-please-version diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go similarity index 83% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go index 258c0636aa..7c08e564f1 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go @@ -19,9 +19,10 @@ import ( "errors" "io" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" ) +// TODO: Give error package name prefix in next minor release. var errInvalidVarint = errors.New("invalid varint32 encountered") // ReadDelimited decodes a message from the provided length-delimited stream, @@ -36,6 +37,12 @@ var errInvalidVarint = errors.New("invalid varint32 encountered") // of the stream has been reached in doing so. In that case, any subsequent // calls return (0, io.EOF). func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // TODO: Consider allowing the caller to specify a decode buffer in the + // next major version. + + // TODO: Consider using error wrapping to annotate error state in pass- + // through cases in the next minor version. + // Per AbstractParser#parsePartialDelimitedFrom with // CodedInputStream#readRawVarint32. var headerBuf [binary.MaxVarintLen32]byte @@ -53,15 +60,14 @@ func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { if err != nil { return bytesRead, err } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... + // A Reader should not return (0, nil); but if it does, it should + // be treated as no-op according to the Reader contract. continue } bytesRead += newBytesRead // Now present everything read so far to the varint decoder and // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + messageLength, varIntBytes = binary.Uvarint(headerBuf[:bytesRead]) } messageBuf := make([]byte, messageLength) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go similarity index 91% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go rename to vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go index 8fb59ad226..e58dd9d297 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go @@ -18,7 +18,7 @@ import ( "encoding/binary" "io" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" ) // WriteDelimited encodes and dumps a message to the provided writer prefixed @@ -28,6 +28,9 @@ import ( // number of bytes written and any applicable error. This is roughly // equivalent to the companion Java API's MessageLite#writeDelimitedTo. func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + // TODO: Consider allowing the caller to specify an encode buffer in the + // next major version. + buffer, err := proto.Marshal(m) if err != nil { return 0, err diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 1feba62c6c..b5c8bcb395 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -475,6 +475,9 @@ type HistogramOpts struct { // now is for testing purposes, by default it's time.Now. now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } // HistogramVecOpts bundles the options to create a HistogramVec metric. @@ -526,7 +529,9 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.now == nil { opts.now = time.Now } - + if opts.afterFunc == nil { + opts.afterFunc = time.AfterFunc + } h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -536,6 +541,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, lastResetTime: opts.now(), now: opts.now, + afterFunc: opts.afterFunc, } if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { h.upperBounds = DefBuckets @@ -716,9 +722,16 @@ type histogram struct { nativeHistogramMinResetDuration time.Duration // lastResetTime is protected by mtx. It is also used as created timestamp. lastResetTime time.Time + // resetScheduled is protected by mtx. It is true if a reset is + // scheduled for a later time (when nativeHistogramMinResetDuration has + // passed). + resetScheduled bool // now is for testing purposes, by default it's time.Now. now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } func (h *histogram) Desc() *Desc { @@ -874,21 +887,31 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { return } + // One of the other strategies will happen. To undo what they will do as + // soon as enough time has passed to satisfy + // h.nativeHistogramMinResetDuration, schedule a reset at the right time + // if we haven't done so already. + if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled { + h.resetScheduled = true + h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset) + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { return } h.doubleBucketWidth(hotCounts, coldCounts) } -// maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration -// has been passed. It returns true if the histogram has been reset. The caller -// must have locked h.mtx. +// maybeReset resets the whole histogram if at least +// h.nativeHistogramMinResetDuration has been passed. It returns true if the +// histogram has been reset. The caller must have locked h.mtx. func (h *histogram) maybeReset( hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int, ) bool { // We are using the possibly mocked h.now() rather than // time.Since(h.lastResetTime) to enable testing. - if h.nativeHistogramMinResetDuration == 0 || + if h.nativeHistogramMinResetDuration == 0 || // No reset configured. + h.resetScheduled || // Do not interefere if a reset is already scheduled. h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { return false } @@ -906,6 +929,29 @@ func (h *histogram) maybeReset( return true } +// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be +// called without having locked h.mtx. +func (h *histogram) reset() { + h.mtx.Lock() + defer h.mtx.Unlock() + + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hot := h.counts[hotIdx] + cold := h.counts[coldIdx] + // Completely reset coldCounts. + h.resetCounts(cold) + // Make coldCounts the new hot counts while resetting countAndHotIdx. + n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + h.resetScheduled = false +} + // maybeWidenZeroBucket widens the zero bucket until it includes the existing // buckets closest to the zero bucket (which could be two, if an equidistant // negative and a positive bucket exists, but usually it's only one bucket to be diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index b3c4eca2bc..c21911f292 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -165,6 +165,8 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { + // The call below makes vals escape, copy them to avoid that. + vals := append([]string(nil), vals...) return fmt.Errorf( "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index c0152cdb61..8c1136ceea 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows && !js -// +build !windows,!js +//go:build !windows && !js && !wasip1 +// +build !windows,!js,!wasip1 package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go new file mode 100644 index 0000000000..d8d9a6d7a2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go @@ -0,0 +1,26 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build wasip1 +// +build wasip1 + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (*processCollector) processCollect(chan<- Metric) { + // noop on this platform + return +} diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 84946b2703..cee360db7f 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -474,6 +474,9 @@ type Histogram struct { NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. // Positive buckets for the native histogram. + // Use a no-op span (offset 0, length 0) for a native histogram without any + // observations yet and with a zero_threshold of 0. Otherwise, it would be + // indistinguishable from a classic histogram. PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 9063978151..0ca86a3dc7 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -22,7 +22,7 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/matttproud/golang_protobuf_extensions/v2/pbutil" "github.com/prometheus/common/model" ) diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 7f611ffaad..ca21406000 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,7 +18,7 @@ import ( "io" "net/http" - "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/matttproud/golang_protobuf_extensions/v2/pbutil" "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" "google.golang.org/protobuf/encoding/prototext" diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 0ce7ea4612..062a281856 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.53.3 +GOLANGCI_LINT_VERSION ?= v1.54.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 13d74e3957..134767d69a 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build netbsd || openbsd || solaris || windows || nostatfs -// +build netbsd openbsd solaris windows nostatfs +//go:build !freebsd && !linux +// +build !freebsd,!linux package procfs diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go index bee151445a..80df79c319 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !netbsd && !openbsd && !solaris && !windows && !nostatfs -// +build !netbsd,!openbsd,!solaris,!windows,!nostatfs +//go:build freebsd || linux +// +build freebsd linux package procfs diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 852c8c4a0e..9d8af6db74 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -44,6 +44,14 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 + + // kernel version >= 4.14 MaxLen + // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 + fieldTransport11RDMAMaxLen = 28 + + // kernel version <= 4.2 MinLen + // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 + fieldTransport11RDMAMinLen = 20 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. @@ -233,6 +241,33 @@ type NFSTransportStats struct { // A running counter, incremented on each request as the current size of the // pending queue. CumulativePendingQueue uint64 + + // Stats below only available with stat version 1.1. + // Transport over RDMA + + // accessed when sending a call + ReadChunkCount uint64 + WriteChunkCount uint64 + ReplyChunkCount uint64 + TotalRdmaRequest uint64 + + // rarely accessed error counters + PullupCopyCount uint64 + HardwayRegisterCount uint64 + FailedMarshalCount uint64 + BadReplyCount uint64 + MrsRecovered uint64 + MrsOrphaned uint64 + MrsAllocated uint64 + EmptySendctxQ uint64 + + // accessed when receiving a reply + TotalRdmaReply uint64 + FixupCopyCount uint64 + ReplyWaitsForSend uint64 + LocalInvNeeded uint64 + NomsgCallCount uint64 + BcallCount uint64 } // parseMountStats parses a /proc/[pid]/mountstats file and returns a slice @@ -587,14 +622,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats expectedLength = fieldTransport11TCPLen } else if protocol == "udp" { expectedLength = fieldTransport11UDPLen + } else if protocol == "rdma" { + expectedLength = fieldTransport11RDMAMinLen } else { return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } - if len(ss) != expectedLength { - return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v", ErrFileParse, ss) + if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || + (protocol == "rdma" && len(ss) < expectedLength) { + return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) } default: - return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q", ErrFileParse, statVersion) + return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay @@ -604,7 +642,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // Note: slice length must be set to length of v1.1 stats to avoid a panic when // only v1.0 stats are present. // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) + // + // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen + ns := make([]uint64, fieldTransport11RDMAMaxLen+3) for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { @@ -622,9 +662,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // we set them to 0 here. if protocol == "udp" { ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } else if protocol == "tcp" { + ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) + } else if protocol == "rdma" { + ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } return &NFSTransportStats{ + // NFS xprt over tcp or udp Protocol: protocol, Port: ns[0], Bind: ns[1], @@ -636,8 +681,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats BadTransactionIDs: ns[7], CumulativeActiveRequests: ns[8], CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], + + // NFS xprt over tcp or udp + // And statVersion 1.1 + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + + // NFS xprt over rdma + // And stat Version 1.1 + ReadChunkCount: ns[13], + WriteChunkCount: ns[14], + ReplyChunkCount: ns[15], + TotalRdmaRequest: ns[16], + PullupCopyCount: ns[17], + HardwayRegisterCount: ns[18], + FailedMarshalCount: ns[19], + BadReplyCount: ns[20], + MrsRecovered: ns[21], + MrsOrphaned: ns[22], + MrsAllocated: ns[23], + EmptySendctxQ: ns[24], + TotalRdmaReply: ns[25], + FixupCopyCount: ns[26], + ReplyWaitsForSend: ns[27], + LocalInvNeeded: ns[28], + NomsgCallCount: ns[29], + BcallCount: ns[30], }, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index 4b7933e4f9..fa761b3529 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -26,6 +26,7 @@ var ( rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rIno = regexp.MustCompile(`^ino:\s+(\d+)$`) rInotify = regexp.MustCompile(`^inotify`) rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) ) @@ -40,6 +41,8 @@ type ProcFDInfo struct { Flags string // Mount point ID MntID string + // Inode number + Ino string // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) InotifyInfos []InotifyInfo } @@ -51,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { return nil, err } - var text, pos, flags, mntid string + var text, pos, flags, mntid, ino string var inotify []InotifyInfo scanner := bufio.NewScanner(bytes.NewReader(data)) @@ -63,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { flags = rFlags.FindStringSubmatch(text)[1] } else if rMntID.MatchString(text) { mntid = rMntID.FindStringSubmatch(text)[1] + } else if rIno.MatchString(text) { + ino = rIno.FindStringSubmatch(text)[1] } else if rInotify.MatchString(text) { newInotify, err := parseInotifyInfo(text) if err != nil { @@ -77,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { Pos: pos, Flags: flags, MntID: mntid, + Ino: ino, InotifyInfos: inotify, } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index 727549a13f..7e75c286b5 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -63,17 +63,17 @@ type ProcMap struct { // parseDevice parses the device token of a line and converts it to a dev_t // (mkdev) like structure. func parseDevice(s string) (uint64, error) { - toks := strings.Split(s, ":") - if len(toks) < 2 { - return 0, fmt.Errorf("%w: unexpected number of fields, expected: 2, got: %q", ErrFileParse, len(toks)) + i := strings.Index(s, ":") + if i == -1 { + return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s) } - major, err := strconv.ParseUint(toks[0], 16, 0) + major, err := strconv.ParseUint(s[0:i], 16, 0) if err != nil { return 0, err } - minor, err := strconv.ParseUint(toks[1], 16, 0) + minor, err := strconv.ParseUint(s[i+1:], 16, 0) if err != nil { return 0, err } @@ -93,17 +93,17 @@ func parseAddress(s string) (uintptr, error) { // parseAddresses parses the start-end address. func parseAddresses(s string) (uintptr, uintptr, error) { - toks := strings.Split(s, "-") - if len(toks) < 2 { - return 0, 0, fmt.Errorf("%w: invalid address", ErrFileParse) + idx := strings.Index(s, "-") + if idx == -1 { + return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s) } - saddr, err := parseAddress(toks[0]) + saddr, err := parseAddress(s[0:idx]) if err != nil { return 0, 0, err } - eaddr, err := parseAddress(toks[1]) + eaddr, err := parseAddress(s[idx+1:]) if err != nil { return 0, 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index c055d075db..46307f5721 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -23,7 +23,7 @@ import ( ) // ProcStatus provides status information about the process, -// read from /proc/[pid]/stat. +// read from /proc/[pid]/status. type ProcStatus struct { // The process ID. PID int @@ -32,6 +32,8 @@ type ProcStatus struct { // Thread group ID. TGID int + // List of Pid namespace. + NSpids []uint64 // Peak virtual memory size. VmPeak uint64 // nolint:revive @@ -127,6 +129,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt copy(s.UIDs[:], strings.Split(vString, "\t")) case "Gid": copy(s.GIDs[:], strings.Split(vString, "\t")) + case "NSpid": + s.NSpids = calcNSPidsList(vString) case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -200,3 +204,18 @@ func calcCpusAllowedList(cpuString string) []uint64 { sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) return g } + +func calcNSPidsList(nspidsString string) []uint64 { + s := strings.Split(nspidsString, " ") + var nspids []uint64 + + for _, nspid := range s { + nspid, _ := strconv.ParseUint(nspid, 10, 64) + if nspid == 0 { + continue + } + nspids = append(nspids, nspid) + } + + return nspids +} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/block/v1alpha1/block_sdk.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/block/v1alpha1/block_sdk.go new file mode 100644 index 0000000000..bb1873c0e1 --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/block/v1alpha1/block_sdk.go @@ -0,0 +1,1250 @@ +// This file was automatically generated. DO NOT EDIT. +// If you have any remark or suggestion do not hesitate to open an issue. + +// Package block provides methods and message types of the block v1alpha1 API. +package block + +import ( + "bytes" + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/scaleway/scaleway-sdk-go/internal/errors" + "github.com/scaleway/scaleway-sdk-go/internal/marshaler" + "github.com/scaleway/scaleway-sdk-go/internal/parameter" + "github.com/scaleway/scaleway-sdk-go/namegenerator" + "github.com/scaleway/scaleway-sdk-go/scw" +) + +// always import dependencies +var ( + _ fmt.Stringer + _ json.Unmarshaler + _ url.URL + _ net.IP + _ http.Header + _ bytes.Reader + _ time.Time + _ = strings.Join + + _ scw.ScalewayRequest + _ marshaler.Duration + _ scw.File + _ = parameter.AddToQuery + _ = namegenerator.GetRandomName +) + +type ListSnapshotsRequestOrderBy string + +const ( + // Order by creation date (ascending chronological order). + ListSnapshotsRequestOrderByCreatedAtAsc = ListSnapshotsRequestOrderBy("created_at_asc") + // Order by creation date (descending chronological order). + ListSnapshotsRequestOrderByCreatedAtDesc = ListSnapshotsRequestOrderBy("created_at_desc") + // Order by name (ascending order). + ListSnapshotsRequestOrderByNameAsc = ListSnapshotsRequestOrderBy("name_asc") + // Order by name (descending order). + ListSnapshotsRequestOrderByNameDesc = ListSnapshotsRequestOrderBy("name_desc") +) + +func (enum ListSnapshotsRequestOrderBy) String() string { + if enum == "" { + // return default value if empty + return "created_at_asc" + } + return string(enum) +} + +func (enum ListSnapshotsRequestOrderBy) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, enum)), nil +} + +func (enum *ListSnapshotsRequestOrderBy) UnmarshalJSON(data []byte) error { + tmp := "" + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + *enum = ListSnapshotsRequestOrderBy(ListSnapshotsRequestOrderBy(tmp).String()) + return nil +} + +type ListVolumesRequestOrderBy string + +const ( + // Order by creation date (ascending chronological order). + ListVolumesRequestOrderByCreatedAtAsc = ListVolumesRequestOrderBy("created_at_asc") + // Order by creation date (descending chronological order). + ListVolumesRequestOrderByCreatedAtDesc = ListVolumesRequestOrderBy("created_at_desc") + // Order by name (ascending order). + ListVolumesRequestOrderByNameAsc = ListVolumesRequestOrderBy("name_asc") + // Order by name (descending order). + ListVolumesRequestOrderByNameDesc = ListVolumesRequestOrderBy("name_desc") +) + +func (enum ListVolumesRequestOrderBy) String() string { + if enum == "" { + // return default value if empty + return "created_at_asc" + } + return string(enum) +} + +func (enum ListVolumesRequestOrderBy) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, enum)), nil +} + +func (enum *ListVolumesRequestOrderBy) UnmarshalJSON(data []byte) error { + tmp := "" + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + *enum = ListVolumesRequestOrderBy(ListVolumesRequestOrderBy(tmp).String()) + return nil +} + +type ReferenceStatus string + +const ( + // If unspecified, the status of the reference is unknown by default. + ReferenceStatusUnknownStatus = ReferenceStatus("unknown_status") + // When the reference is being attached (transient). + ReferenceStatusAttaching = ReferenceStatus("attaching") + // When the reference attached to a volume. + ReferenceStatusAttached = ReferenceStatus("attached") + // When the reference is being detached (transient). + ReferenceStatusDetaching = ReferenceStatus("detaching") + // When the reference is detached from a volume - the reference ceases to exist. + ReferenceStatusDetached = ReferenceStatus("detached") + // Reference undergoing snapshotting operation (transient). + ReferenceStatusSnapshotting = ReferenceStatus("snapshotting") + // Error status. + ReferenceStatusError = ReferenceStatus("error") +) + +func (enum ReferenceStatus) String() string { + if enum == "" { + // return default value if empty + return "unknown_status" + } + return string(enum) +} + +func (enum ReferenceStatus) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, enum)), nil +} + +func (enum *ReferenceStatus) UnmarshalJSON(data []byte) error { + tmp := "" + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + *enum = ReferenceStatus(ReferenceStatus(tmp).String()) + return nil +} + +type ReferenceType string + +const ( + // If unspecified, the reference type is unknown by default. + ReferenceTypeUnknownType = ReferenceType("unknown_type") + // Reference linked to a snapshot (for snapshots only). + ReferenceTypeLink = ReferenceType("link") + // Exclusive reference that can be associated to a volume (for volumes only). + ReferenceTypeExclusive = ReferenceType("exclusive") + // Access to the volume or snapshot in a read-only mode, without storage write access to the resource. + ReferenceTypeReadOnly = ReferenceType("read_only") +) + +func (enum ReferenceType) String() string { + if enum == "" { + // return default value if empty + return "unknown_type" + } + return string(enum) +} + +func (enum ReferenceType) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, enum)), nil +} + +func (enum *ReferenceType) UnmarshalJSON(data []byte) error { + tmp := "" + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + *enum = ReferenceType(ReferenceType(tmp).String()) + return nil +} + +type SnapshotStatus string + +const ( + // If unspecified, the snapshot status is unknown by default. + SnapshotStatusUnknownStatus = SnapshotStatus("unknown_status") + // The snapshot is under creation (transient). + SnapshotStatusCreating = SnapshotStatus("creating") + // Snapshot exists and is not attached to any reference. + SnapshotStatusAvailable = SnapshotStatus("available") + // Snapshot in an error status. + SnapshotStatusError = SnapshotStatus("error") + // Snapshot is being deleted (transient). + SnapshotStatusDeleting = SnapshotStatus("deleting") + // Snapshot was deleted. + SnapshotStatusDeleted = SnapshotStatus("deleted") + // Snapshot attached to one or more references. + SnapshotStatusInUse = SnapshotStatus("in_use") + SnapshotStatusLocked = SnapshotStatus("locked") +) + +func (enum SnapshotStatus) String() string { + if enum == "" { + // return default value if empty + return "unknown_status" + } + return string(enum) +} + +func (enum SnapshotStatus) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, enum)), nil +} + +func (enum *SnapshotStatus) UnmarshalJSON(data []byte) error { + tmp := "" + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + *enum = SnapshotStatus(SnapshotStatus(tmp).String()) + return nil +} + +type StorageClass string + +const ( + // If unspecified, the Storage Class is unknown by default. + StorageClassUnknownStorageClass = StorageClass("unknown_storage_class") + // No specific Storage Class selected. + StorageClassUnspecified = StorageClass("unspecified") + // Classic storage. + StorageClassBssd = StorageClass("bssd") + // Performance storage with lower latency. + StorageClassSbs = StorageClass("sbs") +) + +func (enum StorageClass) String() string { + if enum == "" { + // return default value if empty + return "unknown_storage_class" + } + return string(enum) +} + +func (enum StorageClass) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, enum)), nil +} + +func (enum *StorageClass) UnmarshalJSON(data []byte) error { + tmp := "" + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + *enum = StorageClass(StorageClass(tmp).String()) + return nil +} + +type VolumeStatus string + +const ( + // If unspecified, the volume status is unknown by default. + VolumeStatusUnknownStatus = VolumeStatus("unknown_status") + // The volume is under creation (transient). + VolumeStatusCreating = VolumeStatus("creating") + // The volume exists and is not attached to any reference. + VolumeStatusAvailable = VolumeStatus("available") + // The volume exists and is already attached to a reference. + VolumeStatusInUse = VolumeStatus("in_use") + // The volume undergoing deletion (transient). + VolumeStatusDeleting = VolumeStatus("deleting") + VolumeStatusDeleted = VolumeStatus("deleted") + // The volume is being increased (transient). + VolumeStatusResizing = VolumeStatus("resizing") + // The volume is an error status. + VolumeStatusError = VolumeStatus("error") + // The volume is undergoing snapshotting operation (transient). + VolumeStatusSnapshotting = VolumeStatus("snapshotting") + VolumeStatusLocked = VolumeStatus("locked") +) + +func (enum VolumeStatus) String() string { + if enum == "" { + // return default value if empty + return "unknown_status" + } + return string(enum) +} + +func (enum VolumeStatus) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, enum)), nil +} + +func (enum *VolumeStatus) UnmarshalJSON(data []byte) error { + tmp := "" + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + *enum = VolumeStatus(VolumeStatus(tmp).String()) + return nil +} + +// SnapshotParentVolume: snapshot parent volume. +type SnapshotParentVolume struct { + // ID: parent volume UUID (volume from which the snapshot originates). + ID string `json:"id"` + + // Name: name of the parent volume. + Name string `json:"name"` + + // Type: volume type of the parent volume. + Type string `json:"type"` + + // Status: current status the parent volume. + // Default value: unknown_status + Status VolumeStatus `json:"status"` +} + +// VolumeSpecifications: volume specifications. +type VolumeSpecifications struct { + // PerfIops: the maximum IO/s expected, according to the different options available in stock (`5000 | 15000`). + PerfIops *uint32 `json:"perf_iops"` + + // Class: the storage class of the volume. + // Default value: unknown_storage_class + Class StorageClass `json:"class"` +} + +// Reference: reference. +type Reference struct { + // ID: UUID of the reference. + ID string `json:"id"` + + // ProductResourceType: type of resource to which the reference is associated. + ProductResourceType string `json:"product_resource_type"` + + // ProductResourceID: UUID of the product resource it refers to (according to the product_resource_type). + ProductResourceID string `json:"product_resource_id"` + + // CreatedAt: creation date of the reference. + CreatedAt *time.Time `json:"created_at"` + + // Type: type of reference (link, exclusive, read_only). + // Default value: unknown_type + Type ReferenceType `json:"type"` + + // Status: status of reference (attaching, attached, detaching). + // Default value: unknown_status + Status ReferenceStatus `json:"status"` +} + +// CreateVolumeRequestFromEmpty: create volume request from empty. +type CreateVolumeRequestFromEmpty struct { + // Size: must be compliant with the minimum (1 GB) and maximum (10 TB) allowed size. + Size scw.Size `json:"size"` +} + +// CreateVolumeRequestFromSnapshot: create volume request from snapshot. +type CreateVolumeRequestFromSnapshot struct { + // Size: must be compliant with the minimum (1 GB) and maximum (10 TB) allowed size. + // Size is optional and is used only if a resize of the volume is requested, otherwise original snapshot size will be used. + Size *scw.Size `json:"size"` + + // SnapshotID: source snapshot from which volume will be created. + SnapshotID string `json:"snapshot_id"` +} + +// SnapshotSummary: snapshot summary. +type SnapshotSummary struct { + // ID: UUID of the snapshot. + ID string `json:"id"` + + // Name: name of the snapshot. + Name string `json:"name"` + + // ParentVolume: if the parent volume has been deleted, value is null. + ParentVolume *SnapshotParentVolume `json:"parent_volume"` + + // Size: size of the snapshot in bytes. + Size scw.Size `json:"size"` + + // ProjectID: UUID of the project the snapshot belongs to. + ProjectID string `json:"project_id"` + + // CreatedAt: creation date of the snapshot. + CreatedAt *time.Time `json:"created_at"` + + // UpdatedAt: last modification date of the properties of a snapshot. + UpdatedAt *time.Time `json:"updated_at"` + + // Status: current status of the snapshot (available, in_use, ...). + // Default value: unknown_status + Status SnapshotStatus `json:"status"` + + // Tags: list of tags assigned to the volume. + Tags []string `json:"tags"` + + // Zone: snapshot Availability Zone. + Zone scw.Zone `json:"zone"` + + // Class: storage class of the snapshot. + // Default value: unknown_storage_class + Class StorageClass `json:"class"` +} + +// VolumeType: volume type. +type VolumeType struct { + // Type: volume type. + Type string `json:"type"` + + // Pricing: price of the volume billed in GB/hour. + Pricing *scw.Money `json:"pricing"` + + // SnapshotPricing: price of the snapshot billed in GB/hour. + SnapshotPricing *scw.Money `json:"snapshot_pricing"` + + // Specs: volume specifications of the volume type. + Specs *VolumeSpecifications `json:"specs"` +} + +// Volume: volume. +type Volume struct { + // ID: UUID of the volume. + ID string `json:"id"` + + // Name: name of the volume. + Name string `json:"name"` + + // Type: volume type. + Type string `json:"type"` + + // Size: volume size in bytes. + Size scw.Size `json:"size"` + + // ProjectID: UUID of the project to which the volume belongs. + ProjectID string `json:"project_id"` + + // CreatedAt: creation date of the volume. + CreatedAt *time.Time `json:"created_at"` + + // UpdatedAt: last update of the properties of a volume. + UpdatedAt *time.Time `json:"updated_at"` + + // References: list of the references to the volume. + References []*Reference `json:"references"` + + // ParentSnapshotID: when a volume is created from a snapshot, is the UUID of the snapshot from which the volume has been created. + ParentSnapshotID *string `json:"parent_snapshot_id"` + + // Status: current status of the volume (available, in_use, ...). + // Default value: unknown_status + Status VolumeStatus `json:"status"` + + // Tags: list of tags assigned to the volume. + Tags []string `json:"tags"` + + // Zone: volume zone. + Zone scw.Zone `json:"zone"` + + // Specs: specifications of the volume. + Specs *VolumeSpecifications `json:"specs"` + + // LastDetachedAt: last time the volume was detached. + LastDetachedAt *time.Time `json:"last_detached_at"` +} + +// CreateSnapshotRequest: create snapshot request. +type CreateSnapshotRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // VolumeID: UUID of the volume to snapshot. + VolumeID string `json:"volume_id"` + + // Name: name of the snapshot. + Name string `json:"name"` + + // ProjectID: UUID of the project to which the volume and the snapshot belong. + ProjectID string `json:"project_id"` + + // Tags: list of tags assigned to the snapshot. + Tags []string `json:"tags"` +} + +// CreateVolumeRequest: create volume request. +type CreateVolumeRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Name: name of the volume. + Name string `json:"name"` + + // PerfIops: the maximum IO/s expected, according to the different options available in stock (`5000 | 15000`). + // Precisely one of PerfIops must be set. + PerfIops *uint32 `json:"perf_iops,omitempty"` + + // ProjectID: UUID of the project the volume belongs to. + ProjectID string `json:"project_id"` + + // FromEmpty: specify the size of the new volume if creating a new one from scratch. + // Precisely one of FromEmpty, FromSnapshot must be set. + FromEmpty *CreateVolumeRequestFromEmpty `json:"from_empty,omitempty"` + + // FromSnapshot: specify the snapshot ID of the original snapshot. + // Precisely one of FromEmpty, FromSnapshot must be set. + FromSnapshot *CreateVolumeRequestFromSnapshot `json:"from_snapshot,omitempty"` + + // Tags: list of tags assigned to the volume. + Tags []string `json:"tags"` +} + +// DeleteSnapshotRequest: delete snapshot request. +type DeleteSnapshotRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SnapshotID: UUID of the snapshot. + SnapshotID string `json:"-"` +} + +// DeleteVolumeRequest: delete volume request. +type DeleteVolumeRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // VolumeID: UUID of the volume. + VolumeID string `json:"-"` +} + +// GetSnapshotRequest: get snapshot request. +type GetSnapshotRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SnapshotID: UUID of the snapshot. + SnapshotID string `json:"-"` +} + +// GetVolumeRequest: get volume request. +type GetVolumeRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // VolumeID: UUID of the volume. + VolumeID string `json:"-"` +} + +// ImportSnapshotFromS3Request: import snapshot from s3 request. +type ImportSnapshotFromS3Request struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + Bucket string `json:"bucket"` + + Key string `json:"key"` + + Name string `json:"name"` + + ProjectID string `json:"project_id"` + + Tags []string `json:"tags"` + + Size *scw.Size `json:"size,omitempty"` +} + +// ListSnapshotsRequest: list snapshots request. +type ListSnapshotsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // OrderBy: criteria to use when ordering the list. + // Default value: created_at_asc + OrderBy ListSnapshotsRequestOrderBy `json:"-"` + + // ProjectID: filter by Project ID. + ProjectID *string `json:"-"` + + // OrganizationID: filter by Organization ID. + OrganizationID *string `json:"-"` + + // Page: page number. + Page *int32 `json:"-"` + + // PageSize: page size, defines how many entries are returned in one page, must be lower or equal to 100. + PageSize *uint32 `json:"-"` + + // VolumeID: filter snapshots by the ID of the original volume. + VolumeID *string `json:"-"` + + // Name: filter snapshots by their names. + Name *string `json:"-"` +} + +// ListSnapshotsResponse: list snapshots response. +type ListSnapshotsResponse struct { + // Snapshots: paginated returned list of snapshots. + Snapshots []*SnapshotSummary `json:"snapshots"` + + // TotalCount: total number of snpashots in the project. + TotalCount uint64 `json:"total_count"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListSnapshotsResponse) UnsafeGetTotalCount() uint64 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListSnapshotsResponse) UnsafeAppend(res interface{}) (uint64, error) { + results, ok := res.(*ListSnapshotsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Snapshots = append(r.Snapshots, results.Snapshots...) + r.TotalCount += uint64(len(results.Snapshots)) + return uint64(len(results.Snapshots)), nil +} + +// ListVolumeTypesRequest: list volume types request. +type ListVolumeTypesRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Page: page number. + Page *int32 `json:"-"` + + // PageSize: page size, defines how many entries are returned in one page, must be lower or equal to 100. + PageSize *uint32 `json:"-"` +} + +// ListVolumeTypesResponse: list volume types response. +type ListVolumeTypesResponse struct { + // VolumeTypes: returns paginated list of volume-types. + VolumeTypes []*VolumeType `json:"volume_types"` + + // TotalCount: total number of volume-types currently available in stock. + TotalCount uint64 `json:"total_count"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListVolumeTypesResponse) UnsafeGetTotalCount() uint64 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListVolumeTypesResponse) UnsafeAppend(res interface{}) (uint64, error) { + results, ok := res.(*ListVolumeTypesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.VolumeTypes = append(r.VolumeTypes, results.VolumeTypes...) + r.TotalCount += uint64(len(results.VolumeTypes)) + return uint64(len(results.VolumeTypes)), nil +} + +// ListVolumesRequest: list volumes request. +type ListVolumesRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // OrderBy: criteria to use when ordering the list. + // Default value: created_at_asc + OrderBy ListVolumesRequestOrderBy `json:"-"` + + // ProjectID: filter by Project ID. + ProjectID *string `json:"-"` + + // OrganizationID: filter by Organization ID. + OrganizationID *string `json:"-"` + + // Page: page number. + Page *int32 `json:"-"` + + // PageSize: page size, defines how many entries are returned in one page, must be lower or equal to 100. + PageSize *uint32 `json:"-"` + + // Name: filter the return volumes by their names. + Name *string `json:"-"` + + // ProductResourceID: filter by a product resource ID linked to this volume (such as an Instance ID). + ProductResourceID *string `json:"-"` +} + +// ListVolumesResponse: list volumes response. +type ListVolumesResponse struct { + // Volumes: paginated returned list of volumes. + Volumes []*Volume `json:"volumes"` + + // TotalCount: total number of volumes in the project. + TotalCount uint64 `json:"total_count"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListVolumesResponse) UnsafeGetTotalCount() uint64 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListVolumesResponse) UnsafeAppend(res interface{}) (uint64, error) { + results, ok := res.(*ListVolumesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Volumes = append(r.Volumes, results.Volumes...) + r.TotalCount += uint64(len(results.Volumes)) + return uint64(len(results.Volumes)), nil +} + +// Snapshot: snapshot. +type Snapshot struct { + // ID: UUID of the snapshot. + ID string `json:"id"` + + // Name: name of the snapshot. + Name string `json:"name"` + + // ParentVolume: if the parent volume was deleted, value is null. + ParentVolume *SnapshotParentVolume `json:"parent_volume"` + + // Size: size in bytes of the snapshot. + Size scw.Size `json:"size"` + + // ProjectID: UUID of the project the snapshot belongs to. + ProjectID string `json:"project_id"` + + // CreatedAt: creation date of the snapshot. + CreatedAt *time.Time `json:"created_at"` + + // UpdatedAt: last modification date of the properties of a snapshot. + UpdatedAt *time.Time `json:"updated_at"` + + // References: list of the references to the snapshot. + References []*Reference `json:"references"` + + // Status: current status of the snapshot (available, in_use, ...). + // Default value: unknown_status + Status SnapshotStatus `json:"status"` + + // Tags: list of tags assigned to the volume. + Tags []string `json:"tags"` + + // Zone: snapshot zone. + Zone scw.Zone `json:"zone"` + + // Class: storage class of the snapshot. + // Default value: unknown_storage_class + Class StorageClass `json:"class"` +} + +// UpdateSnapshotRequest: update snapshot request. +type UpdateSnapshotRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SnapshotID: UUID of the snapshot. + SnapshotID string `json:"-"` + + // Name: when defined, is the name of the snapshot. + Name *string `json:"name,omitempty"` + + // Tags: list of tags assigned to the snapshot. + Tags *[]string `json:"tags,omitempty"` +} + +// UpdateVolumeRequest: update volume request. +type UpdateVolumeRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // VolumeID: UUID of the volume. + VolumeID string `json:"-"` + + // Name: when defined, is the new name of the volume. + Name *string `json:"name,omitempty"` + + // Size: size in bytes of the volume, with a granularity of 1 GB (10^9 bytes). + // Must be compliant with the minimum (1GB) and maximum (10TB) allowed size. + Size *scw.Size `json:"size,omitempty"` + + // Tags: list of tags assigned to the volume. + Tags *[]string `json:"tags,omitempty"` + + // PerfIops: the selected value must be available for the volume's current storage class. + PerfIops *uint32 `json:"perf_iops,omitempty"` +} + +// This API allows you to use and manage your Block Storage volumes. +type API struct { + client *scw.Client +} + +// NewAPI returns a API object from a Scaleway client. +func NewAPI(client *scw.Client) *API { + return &API{ + client: client, + } +} +func (s *API) Zones() []scw.Zone { + return []scw.Zone{scw.ZoneFrPar1, scw.ZoneFrPar2, scw.ZoneNlAms1, scw.ZoneNlAms3, scw.ZonePlWaw3} +} + +// ListVolumeTypes: List all available volume types in a specified zone. The volume types listed are ordered by name in ascending order. +func (s *API) ListVolumeTypes(req *ListVolumeTypesRequest, opts ...scw.RequestOption) (*ListVolumeTypesResponse, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + query := url.Values{} + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "page_size", req.PageSize) + + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/block/v1alpha1/zones/" + fmt.Sprint(req.Zone) + "/volume-types", + Query: query, + } + + var resp ListVolumeTypesResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// ListVolumes: List all existing volumes in a specified zone. By default, the volumes listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. +func (s *API) ListVolumes(req *ListVolumesRequest, opts ...scw.RequestOption) (*ListVolumesResponse, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + query := url.Values{} + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "project_id", req.ProjectID) + parameter.AddToQuery(query, "organization_id", req.OrganizationID) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "name", req.Name) + parameter.AddToQuery(query, "product_resource_id", req.ProductResourceID) + + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/block/v1alpha1/zones/" + fmt.Sprint(req.Zone) + "/volumes", + Query: query, + } + + var resp ListVolumesResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// CreateVolume: To create a new volume from scratch, you must specify `from_empty` and the `size`. +// To create a volume from an existing snapshot, specify `from_snapshot` and the `snapshot_id` in the request payload instead, size is optional and can be specified if you need to extend the original size. The volume will take on the same volume class and underlying IOPS limitations as the original snapshot. +func (s *API) CreateVolume(req *CreateVolumeRequest, opts ...scw.RequestOption) (*Volume, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + if req.ProjectID == "" { + defaultProjectID, _ := s.client.GetDefaultProjectID() + req.ProjectID = defaultProjectID + } + + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/block/v1alpha1/zones/" + fmt.Sprint(req.Zone) + "/volumes", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Volume + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetVolume: Retrieve technical information about a specific volume. Details such as size, type, and status are returned in the response. +func (s *API) GetVolume(req *GetVolumeRequest, opts ...scw.RequestOption) (*Volume, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + if fmt.Sprint(req.VolumeID) == "" { + return nil, errors.New("field VolumeID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/block/v1alpha1/zones/" + fmt.Sprint(req.Zone) + "/volumes/" + fmt.Sprint(req.VolumeID) + "", + } + + var resp Volume + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// DeleteVolume: You must specify the `volume_id` of the volume you want to delete. The volume must not be in the `in_use` status. +func (s *API) DeleteVolume(req *DeleteVolumeRequest, opts ...scw.RequestOption) error { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + if fmt.Sprint(req.Zone) == "" { + return errors.New("field Zone cannot be empty in request") + } + + if fmt.Sprint(req.VolumeID) == "" { + return errors.New("field VolumeID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "DELETE", + Path: "/block/v1alpha1/zones/" + fmt.Sprint(req.Zone) + "/volumes/" + fmt.Sprint(req.VolumeID) + "", + } + + err = s.client.Do(scwReq, nil, opts...) + if err != nil { + return err + } + return nil +} + +// UpdateVolume: Update the technical details of a volume, such as its name, tags, or its new size and `volume_type` (within the same Block Storage class). +// You can only resize a volume to a larger size. It is currently not possible to change your Block Storage Class. +func (s *API) UpdateVolume(req *UpdateVolumeRequest, opts ...scw.RequestOption) (*Volume, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + if fmt.Sprint(req.VolumeID) == "" { + return nil, errors.New("field VolumeID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "PATCH", + Path: "/block/v1alpha1/zones/" + fmt.Sprint(req.Zone) + "/volumes/" + fmt.Sprint(req.VolumeID) + "", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Volume + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// ListSnapshots: List all available snapshots in a specified zone. By default, the snapshots listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. +func (s *API) ListSnapshots(req *ListSnapshotsRequest, opts ...scw.RequestOption) (*ListSnapshotsResponse, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + query := url.Values{} + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "project_id", req.ProjectID) + parameter.AddToQuery(query, "organization_id", req.OrganizationID) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "volume_id", req.VolumeID) + parameter.AddToQuery(query, "name", req.Name) + + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/block/v1alpha1/zones/" + fmt.Sprint(req.Zone) + "/snapshots", + Query: query, + } + + var resp ListSnapshotsResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetSnapshot: Retrieve technical information about a specific snapshot. Details such as size, volume type, and status are returned in the response. +func (s *API) GetSnapshot(req *GetSnapshotRequest, opts ...scw.RequestOption) (*Snapshot, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + if fmt.Sprint(req.SnapshotID) == "" { + return nil, errors.New("field SnapshotID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/block/v1alpha1/zones/" + fmt.Sprint(req.Zone) + "/snapshots/" + fmt.Sprint(req.SnapshotID) + "", + } + + var resp Snapshot + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// CreateSnapshot: To create a snapshot, the volume must be in the `in_use` or the `available` status. +// If your volume is in a transient state, you need to wait until the end of the current operation. +func (s *API) CreateSnapshot(req *CreateSnapshotRequest, opts ...scw.RequestOption) (*Snapshot, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + if req.ProjectID == "" { + defaultProjectID, _ := s.client.GetDefaultProjectID() + req.ProjectID = defaultProjectID + } + + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/block/v1alpha1/zones/" + fmt.Sprint(req.Zone) + "/snapshots", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Snapshot + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// ImportSnapshotFromS3: +func (s *API) ImportSnapshotFromS3(req *ImportSnapshotFromS3Request, opts ...scw.RequestOption) (*Snapshot, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + if req.ProjectID == "" { + defaultProjectID, _ := s.client.GetDefaultProjectID() + req.ProjectID = defaultProjectID + } + + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/block/v1alpha1/zones/" + fmt.Sprint(req.Zone) + "/snapshots/import-from-s3", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Snapshot + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// DeleteSnapshot: You must specify the `snapshot_id` of the snapshot you want to delete. The snapshot must not be in use. +func (s *API) DeleteSnapshot(req *DeleteSnapshotRequest, opts ...scw.RequestOption) error { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + if fmt.Sprint(req.Zone) == "" { + return errors.New("field Zone cannot be empty in request") + } + + if fmt.Sprint(req.SnapshotID) == "" { + return errors.New("field SnapshotID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "DELETE", + Path: "/block/v1alpha1/zones/" + fmt.Sprint(req.Zone) + "/snapshots/" + fmt.Sprint(req.SnapshotID) + "", + } + + err = s.client.Do(scwReq, nil, opts...) + if err != nil { + return err + } + return nil +} + +// UpdateSnapshot: Update the name or tags of the snapshot. +func (s *API) UpdateSnapshot(req *UpdateSnapshotRequest, opts ...scw.RequestOption) (*Snapshot, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + if fmt.Sprint(req.SnapshotID) == "" { + return nil, errors.New("field SnapshotID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "PATCH", + Path: "/block/v1alpha1/zones/" + fmt.Sprint(req.Zone) + "/snapshots/" + fmt.Sprint(req.SnapshotID) + "", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Snapshot + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/block/v1alpha1/snapshot_utils.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/block/v1alpha1/snapshot_utils.go new file mode 100644 index 0000000000..d16f6114fc --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/block/v1alpha1/snapshot_utils.go @@ -0,0 +1,59 @@ +package block + +import ( + "time" + + "github.com/scaleway/scaleway-sdk-go/internal/async" + "github.com/scaleway/scaleway-sdk-go/internal/errors" + "github.com/scaleway/scaleway-sdk-go/scw" +) + +// WaitForSnapshotRequest is used by WaitForSnapshot method. +type WaitForSnapshotRequest struct { + SnapshotID string + Zone scw.Zone + Timeout *time.Duration + RetryInterval *time.Duration +} + +// WaitForSnapshot wait for the snapshot to be in a "terminal state" before returning. +func (s *API) WaitForSnapshot(req *WaitForSnapshotRequest, opts ...scw.RequestOption) (*Snapshot, error) { + timeout := defaultTimeout + if req.Timeout != nil { + timeout = *req.Timeout + } + retryInterval := defaultRetryInterval + if req.RetryInterval != nil { + retryInterval = *req.RetryInterval + } + + terminalStatus := map[SnapshotStatus]struct{}{ + SnapshotStatusAvailable: {}, + SnapshotStatusInUse: {}, + SnapshotStatusError: {}, + SnapshotStatusLocked: {}, + SnapshotStatusDeleted: {}, + } + + snapshot, err := async.WaitSync(&async.WaitSyncConfig{ + Get: func() (interface{}, bool, error) { + res, err := s.GetSnapshot(&GetSnapshotRequest{ + SnapshotID: req.SnapshotID, + Zone: req.Zone, + }, opts...) + + if err != nil { + return nil, false, err + } + _, isTerminal := terminalStatus[res.Status] + + return res, isTerminal, err + }, + Timeout: timeout, + IntervalStrategy: async.LinearIntervalStrategy(retryInterval), + }) + if err != nil { + return nil, errors.Wrap(err, "waiting for snapshot failed") + } + return snapshot.(*Snapshot), nil +} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/block/v1alpha1/volume_utils.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/block/v1alpha1/volume_utils.go new file mode 100644 index 0000000000..229b4111d2 --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/block/v1alpha1/volume_utils.go @@ -0,0 +1,149 @@ +package block + +import ( + "time" + + "github.com/scaleway/scaleway-sdk-go/internal/async" + "github.com/scaleway/scaleway-sdk-go/internal/errors" + "github.com/scaleway/scaleway-sdk-go/scw" +) + +const ( + defaultTimeout = 5 * time.Minute + defaultRetryInterval = 5 * time.Second +) + +// WaitForVolumeRequest is used by WaitForVolume method. +type WaitForVolumeRequest struct { + VolumeID string + Zone scw.Zone + Timeout *time.Duration + RetryInterval *time.Duration + + // If set, will wait until this specific status has been reached or the + // volume has an error status. This is useful when we need to wait for + // the volume to transition from "in_use" to "available". + TerminalStatus *VolumeStatus +} + +// WaitForVolume waits for the volume to be in a "terminal state" before returning. +func (s *API) WaitForVolume(req *WaitForVolumeRequest, opts ...scw.RequestOption) (*Volume, error) { + timeout := defaultTimeout + if req.Timeout != nil { + timeout = *req.Timeout + } + retryInterval := defaultRetryInterval + if req.RetryInterval != nil { + retryInterval = *req.RetryInterval + } + + terminalStatus := map[VolumeStatus]struct{}{ + VolumeStatusError: {}, + VolumeStatusLocked: {}, + VolumeStatusDeleted: {}, + } + + if req.TerminalStatus != nil { + terminalStatus[*req.TerminalStatus] = struct{}{} + } else { + terminalStatus[VolumeStatusAvailable] = struct{}{} + terminalStatus[VolumeStatusInUse] = struct{}{} + } + + volume, err := async.WaitSync(&async.WaitSyncConfig{ + Get: func() (interface{}, bool, error) { + res, err := s.GetVolume(&GetVolumeRequest{ + VolumeID: req.VolumeID, + Zone: req.Zone, + }, opts...) + + if err != nil { + return nil, false, err + } + _, isTerminal := terminalStatus[res.Status] + + return res, isTerminal, err + }, + Timeout: timeout, + IntervalStrategy: async.LinearIntervalStrategy(retryInterval), + }) + if err != nil { + return nil, errors.Wrap(err, "waiting for volume failed") + } + return volume.(*Volume), nil +} + +// WaitForVolumeAndReferencesRequest is used by WaitForVolumeAndReferences method. +type WaitForVolumeAndReferencesRequest struct { + VolumeID string + Zone scw.Zone + Timeout *time.Duration + RetryInterval *time.Duration + + VolumeTerminalStatus *VolumeStatus + ReferenceTerminalStatus *ReferenceStatus +} + +// WaitForVolumeAndReferences waits for the volume and its references to be in a "terminal state" before returning. +func (s *API) WaitForVolumeAndReferences(req *WaitForVolumeAndReferencesRequest, opts ...scw.RequestOption) (*Volume, error) { + timeout := defaultTimeout + if req.Timeout != nil { + timeout = *req.Timeout + } + retryInterval := defaultRetryInterval + if req.RetryInterval != nil { + retryInterval = *req.RetryInterval + } + + terminalStatus := map[VolumeStatus]struct{}{ + VolumeStatusError: {}, + VolumeStatusLocked: {}, + VolumeStatusDeleted: {}, + } + if req.VolumeTerminalStatus != nil { + terminalStatus[*req.VolumeTerminalStatus] = struct{}{} + } else { + terminalStatus[VolumeStatusAvailable] = struct{}{} + terminalStatus[VolumeStatusInUse] = struct{}{} + } + + referenceTerminalStatus := map[ReferenceStatus]struct{}{ + ReferenceStatusError: {}, + } + if req.ReferenceTerminalStatus != nil { + referenceTerminalStatus[*req.ReferenceTerminalStatus] = struct{}{} + } else { + referenceTerminalStatus[ReferenceStatusAttached] = struct{}{} + referenceTerminalStatus[ReferenceStatusDetached] = struct{}{} + } + + volume, err := async.WaitSync(&async.WaitSyncConfig{ + Get: func() (interface{}, bool, error) { + volume, err := s.GetVolume(&GetVolumeRequest{ + VolumeID: req.VolumeID, + Zone: req.Zone, + }, opts...) + if err != nil { + return nil, false, err + } + + referencesAreTerminal := true + + for _, reference := range volume.References { + _, referenceIsTerminal := referenceTerminalStatus[reference.Status] + referencesAreTerminal = referencesAreTerminal && referenceIsTerminal + } + + _, isTerminal := terminalStatus[volume.Status] + + return volume, isTerminal && referencesAreTerminal, nil + }, + Timeout: timeout, + IntervalStrategy: async.LinearIntervalStrategy(retryInterval), + }) + if err != nil { + return nil, errors.Wrap(err, "waiting for Volume failed") + } + + return volume.(*Volume), nil +} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/domain/v2beta1/domain_sdk.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/domain/v2beta1/domain_sdk.go index c4aa4999b4..8334923bd2 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/domain/v2beta1/domain_sdk.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/domain/v2beta1/domain_sdk.go @@ -19,6 +19,7 @@ import ( "github.com/scaleway/scaleway-sdk-go/internal/parameter" "github.com/scaleway/scaleway-sdk-go/namegenerator" "github.com/scaleway/scaleway-sdk-go/scw" + std "github.com/scaleway/scaleway-sdk-go/api/std" ) // always import dependencies @@ -39,32 +40,6 @@ var ( _ = namegenerator.GetRandomName ) -// API: domains and DNS API. -// Manage your domains, DNS zones and records with the Domains and DNS API. -type API struct { - client *scw.Client -} - -// NewAPI returns a API object from a Scaleway client. -func NewAPI(client *scw.Client) *API { - return &API{ - client: client, - } -} - -// RegistrarAPI: domains and DNS - Registrar API. -// Manage your domains and contacts. -type RegistrarAPI struct { - client *scw.Client -} - -// NewRegistrarAPI returns a RegistrarAPI object from a Scaleway client. -func NewRegistrarAPI(client *scw.Client) *RegistrarAPI { - return &RegistrarAPI{ - client: client, - } -} - type ContactEmailStatus string const ( @@ -254,7 +229,7 @@ func (enum *ContactLegalForm) UnmarshalJSON(data []byte) error { type DNSZoneStatus string const ( - // If unspecified, the DNS zone's status is unknown by default + // If unspecified, the DNS zone's status is unknown by default. DNSZoneStatusUnknown = DNSZoneStatus("unknown") // The DNS zone is active and healthy. DNSZoneStatusActive = DNSZoneStatus("active") @@ -549,35 +524,35 @@ func (enum *HostStatus) UnmarshalJSON(data []byte) error { return nil } -type LanguageCode string +type LinkedProduct string const ( - LanguageCodeUnknownLanguageCode = LanguageCode("unknown_language_code") - LanguageCodeEnUS = LanguageCode("en_US") - LanguageCodeFrFR = LanguageCode("fr_FR") - LanguageCodeDeDE = LanguageCode("de_DE") + // If unspecified, no Scaleway product uses the resources. + LinkedProductUnknownProduct = LinkedProduct("unknown_product") + // Resources are used by Scaleway VPC. + LinkedProductVpc = LinkedProduct("vpc") ) -func (enum LanguageCode) String() string { +func (enum LinkedProduct) String() string { if enum == "" { // return default value if empty - return "unknown_language_code" + return "unknown_product" } return string(enum) } -func (enum LanguageCode) MarshalJSON() ([]byte, error) { +func (enum LinkedProduct) MarshalJSON() ([]byte, error) { return []byte(fmt.Sprintf(`"%s"`, enum)), nil } -func (enum *LanguageCode) UnmarshalJSON(data []byte) error { +func (enum *LinkedProduct) UnmarshalJSON(data []byte) error { tmp := "" if err := json.Unmarshal(data, &tmp); err != nil { return err } - *enum = LanguageCode(LanguageCode(tmp).String()) + *enum = LinkedProduct(LinkedProduct(tmp).String()) return nil } @@ -695,9 +670,9 @@ func (enum *ListDNSZonesRequestOrderBy) UnmarshalJSON(data []byte) error { type ListDomainsRequestOrderBy string const ( - // Order by domain name (ascending) + // Order by domain name (ascending). ListDomainsRequestOrderByDomainAsc = ListDomainsRequestOrderBy("domain_asc") - // Order by domain name (descending) + // Order by domain name (descending). ListDomainsRequestOrderByDomainDesc = ListDomainsRequestOrderBy("domain_desc") ) @@ -727,9 +702,9 @@ func (enum *ListDomainsRequestOrderBy) UnmarshalJSON(data []byte) error { type ListRenewableDomainsRequestOrderBy string const ( - // Order by domain name (ascending) + // Order by domain name (ascending). ListRenewableDomainsRequestOrderByDomainAsc = ListRenewableDomainsRequestOrderBy("domain_asc") - // Order by domain name (descending) + // Order by domain name (descending). ListRenewableDomainsRequestOrderByDomainDesc = ListRenewableDomainsRequestOrderBy("domain_desc") ) @@ -759,21 +734,21 @@ func (enum *ListRenewableDomainsRequestOrderBy) UnmarshalJSON(data []byte) error type ListTasksRequestOrderBy string const ( - // Order by domain name (descending) + // Order by domain name (descending). ListTasksRequestOrderByDomainDesc = ListTasksRequestOrderBy("domain_desc") - // Order by domain name (ascending) + // Order by domain name (ascending). ListTasksRequestOrderByDomainAsc = ListTasksRequestOrderBy("domain_asc") - // Order by type (ascending) + // Order by type (ascending). ListTasksRequestOrderByTypeAsc = ListTasksRequestOrderBy("type_asc") - // Order by type (descending) + // Order by type (descending). ListTasksRequestOrderByTypeDesc = ListTasksRequestOrderBy("type_desc") - // Order by status (ascending) + // Order by status (ascending). ListTasksRequestOrderByStatusAsc = ListTasksRequestOrderBy("status_asc") - // Order by status (descending) + // Order by status (descending). ListTasksRequestOrderByStatusDesc = ListTasksRequestOrderBy("status_desc") - // Order by updated date (ascending) + // Order by updated date (ascending). ListTasksRequestOrderByUpdatedAtAsc = ListTasksRequestOrderBy("updated_at_asc") - // Order by updated date (descending) + // Order by updated date (descending). ListTasksRequestOrderByUpdatedAtDesc = ListTasksRequestOrderBy("updated_at_desc") ) @@ -800,12 +775,44 @@ func (enum *ListTasksRequestOrderBy) UnmarshalJSON(data []byte) error { return nil } +type ListTldsRequestOrderBy string + +const ( + // Order by TLD name (ascending). + ListTldsRequestOrderByNameAsc = ListTldsRequestOrderBy("name_asc") + // Order by TLD name (descending). + ListTldsRequestOrderByNameDesc = ListTldsRequestOrderBy("name_desc") +) + +func (enum ListTldsRequestOrderBy) String() string { + if enum == "" { + // return default value if empty + return "name_asc" + } + return string(enum) +} + +func (enum ListTldsRequestOrderBy) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, enum)), nil +} + +func (enum *ListTldsRequestOrderBy) UnmarshalJSON(data []byte) error { + tmp := "" + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + *enum = ListTldsRequestOrderBy(ListTldsRequestOrderBy(tmp).String()) + return nil +} + type RawFormat string const ( // If unspecified, the format is unknown by default. RawFormatUnknownRawFormat = RawFormat("unknown_raw_format") - // Export the DNS zone in text bind format + // Export the DNS zone in text bind format. RawFormatBind = RawFormat("bind") ) @@ -887,7 +894,7 @@ const ( RecordTypeMX = RecordType("MX") // Specifies nameservers for a domain. Example: 'ns1.yourcompany.com'. RecordTypeNS = RecordType("NS") - // A reverse pointer is used to specify the hostname that belongs to an IP or an IPv6 address. Example: 'www.yourcompany.com.'. + // A reverse pointer is used to specify the hostname that belongs to an IP or an IPv6 address. Example: 'www.yourcompany.com.'. RecordTypePTR = RecordType("PTR") // A 'Certification Authority Authorization' record is used to specify certificate authorities that may issue certificates for a domain. Example: '0 issue ca.yourcompany.com'. RecordTypeCAA = RecordType("CAA") @@ -1095,8 +1102,10 @@ const ( TaskTypeUpdateHost = TaskType("update_host") // Delete domain's hostname. TaskTypeDeleteHost = TaskType("delete_host") - // Move a domain to another project. + // Move a domain to another Project. TaskTypeMoveProject = TaskType("move_project") + // Transfer a domain from Online to Scaleway Domains and DNS. + TaskTypeTransferOnlineDomain = TaskType("transfer_online_domain") ) func (enum TaskType) String() string { @@ -1122,38 +1131,252 @@ func (enum *TaskType) UnmarshalJSON(data []byte) error { return nil } -type AvailableDomain struct { - Domain string `json:"domain"` +// RecordGeoIPConfigMatch: record geo ip config match. +type RecordGeoIPConfigMatch struct { + Countries []string `json:"countries"` - Available bool `json:"available"` + Continents []string `json:"continents"` - Tld *Tld `json:"tld"` + Data string `json:"data"` } -// CheckContactsCompatibilityResponse: check contacts compatibility response. -type CheckContactsCompatibilityResponse struct { - Compatible bool `json:"compatible"` +// RecordViewConfigView: record view config view. +type RecordViewConfigView struct { + Subnet string `json:"subnet"` - OwnerCheckResult *CheckContactsCompatibilityResponseContactCheckResult `json:"owner_check_result"` - - AdministrativeCheckResult *CheckContactsCompatibilityResponseContactCheckResult `json:"administrative_check_result"` - - TechnicalCheckResult *CheckContactsCompatibilityResponseContactCheckResult `json:"technical_check_result"` + Data string `json:"data"` } -type CheckContactsCompatibilityResponseContactCheckResult struct { - Compatible bool `json:"compatible"` +// RecordWeightedConfigWeightedIP: record weighted config weighted ip. +type RecordWeightedConfigWeightedIP struct { + IP net.IP `json:"ip"` - ErrorMessage *string `json:"error_message"` + Weight uint32 `json:"weight"` } -// ClearDNSZoneRecordsResponse: clear dns zone records response. -type ClearDNSZoneRecordsResponse struct { +// DSRecordPublicKey: ds record public key. +type DSRecordPublicKey struct { + Key string `json:"key"` +} + +// RecordGeoIPConfig: record geo ip config. +type RecordGeoIPConfig struct { + Matches []*RecordGeoIPConfigMatch `json:"matches"` + + Default string `json:"default"` +} + +// RecordHTTPServiceConfig: record http service config. +type RecordHTTPServiceConfig struct { + IPs []net.IP `json:"ips"` + + MustContain *string `json:"must_contain"` + + URL string `json:"url"` + + UserAgent *string `json:"user_agent"` + + // Strategy: default value: random + Strategy RecordHTTPServiceConfigStrategy `json:"strategy"` +} + +// RecordViewConfig: record view config. +type RecordViewConfig struct { + Views []*RecordViewConfigView `json:"views"` +} + +// RecordWeightedConfig: record weighted config. +type RecordWeightedConfig struct { + WeightedIPs []*RecordWeightedConfigWeightedIP `json:"weighted_ips"` +} + +// ContactExtensionFRAssociationInfo: contact extension fr association info. +type ContactExtensionFRAssociationInfo struct { + PublicationJo *time.Time `json:"publication_jo"` + + PublicationJoPage uint32 `json:"publication_jo_page"` +} + +// ContactExtensionFRCodeAuthAfnicInfo: contact extension fr code auth afnic info. +type ContactExtensionFRCodeAuthAfnicInfo struct { + CodeAuthAfnic string `json:"code_auth_afnic"` +} + +// ContactExtensionFRDunsInfo: contact extension fr duns info. +type ContactExtensionFRDunsInfo struct { + DunsID string `json:"duns_id"` + + LocalID string `json:"local_id"` +} + +// ContactExtensionFRIndividualInfo: contact extension fr individual info. +type ContactExtensionFRIndividualInfo struct { + WhoisOptIn bool `json:"whois_opt_in"` +} + +// ContactExtensionFRTrademarkInfo: contact extension fr trademark info. +type ContactExtensionFRTrademarkInfo struct { + TrademarkInpi string `json:"trademark_inpi"` +} + +// DSRecordDigest: ds record digest. +type DSRecordDigest struct { + // Type: default value: sha_1 + Type DSRecordDigestType `json:"type"` + + Digest string `json:"digest"` + + PublicKey *DSRecordPublicKey `json:"public_key"` +} + +// Record: record. +type Record struct { + Data string `json:"data"` + + Name string `json:"name"` + + Priority uint32 `json:"priority"` + + TTL uint32 `json:"ttl"` + + // Type: default value: unknown + Type RecordType `json:"type"` + + Comment *string `json:"comment"` + + // Precisely one of GeoIPConfig, HTTPServiceConfig, WeightedConfig, ViewConfig must be set. + GeoIPConfig *RecordGeoIPConfig `json:"geo_ip_config,omitempty"` + + // Precisely one of GeoIPConfig, HTTPServiceConfig, WeightedConfig, ViewConfig must be set. + HTTPServiceConfig *RecordHTTPServiceConfig `json:"http_service_config,omitempty"` + + // Precisely one of GeoIPConfig, HTTPServiceConfig, WeightedConfig, ViewConfig must be set. + WeightedConfig *RecordWeightedConfig `json:"weighted_config,omitempty"` + + // Precisely one of GeoIPConfig, HTTPServiceConfig, WeightedConfig, ViewConfig must be set. + ViewConfig *RecordViewConfig `json:"view_config,omitempty"` + + ID string `json:"id"` +} + +// RecordIdentifier: record identifier. +type RecordIdentifier struct { + Name string `json:"name"` + + // Type: default value: unknown + Type RecordType `json:"type"` + + Data *string `json:"data"` + + TTL *uint32 `json:"ttl"` +} + +// ContactExtensionEU: contact extension eu. +type ContactExtensionEU struct { + EuropeanCitizenship string `json:"european_citizenship"` +} + +// ContactExtensionFR: contact extension fr. +type ContactExtensionFR struct { + // Mode: default value: mode_unknown + Mode ContactExtensionFRMode `json:"mode"` + + // Precisely one of IndividualInfo, DunsInfo, AssociationInfo, TrademarkInfo, CodeAuthAfnicInfo must be set. + IndividualInfo *ContactExtensionFRIndividualInfo `json:"individual_info,omitempty"` + + // Precisely one of IndividualInfo, DunsInfo, AssociationInfo, TrademarkInfo, CodeAuthAfnicInfo must be set. + DunsInfo *ContactExtensionFRDunsInfo `json:"duns_info,omitempty"` + + // Precisely one of IndividualInfo, DunsInfo, AssociationInfo, TrademarkInfo, CodeAuthAfnicInfo must be set. + AssociationInfo *ContactExtensionFRAssociationInfo `json:"association_info,omitempty"` + + // Precisely one of IndividualInfo, DunsInfo, AssociationInfo, TrademarkInfo, CodeAuthAfnicInfo must be set. + TrademarkInfo *ContactExtensionFRTrademarkInfo `json:"trademark_info,omitempty"` + + // Precisely one of IndividualInfo, DunsInfo, AssociationInfo, TrademarkInfo, CodeAuthAfnicInfo must be set. + CodeAuthAfnicInfo *ContactExtensionFRCodeAuthAfnicInfo `json:"code_auth_afnic_info,omitempty"` +} + +// ContactExtensionNL: contact extension nl. +type ContactExtensionNL struct { + // LegalForm: default value: legal_form_unknown + LegalForm ContactExtensionNLLegalForm `json:"legal_form"` + + LegalFormRegistrationNumber string `json:"legal_form_registration_number"` +} + +// ContactQuestion: contact question. +type ContactQuestion struct { + Question string `json:"question"` + + Answer string `json:"answer"` +} + +// TldOffer: tld offer. +type TldOffer struct { + Action string `json:"action"` + + OperationPath string `json:"operation_path"` + + Price *scw.Money `json:"price"` +} + +// DSRecord: ds record. +type DSRecord struct { + KeyID uint32 `json:"key_id"` + + // Algorithm: default value: rsamd5 + Algorithm DSRecordAlgorithm `json:"algorithm"` + + // Precisely one of Digest, PublicKey must be set. + Digest *DSRecordDigest `json:"digest,omitempty"` + + // Precisely one of Digest, PublicKey must be set. + PublicKey *DSRecordPublicKey `json:"public_key,omitempty"` +} + +// RecordChangeAdd: record change add. +type RecordChangeAdd struct { + Records []*Record `json:"records"` +} + +// RecordChangeClear: record change clear. +type RecordChangeClear struct { +} + +// RecordChangeDelete: record change delete. +type RecordChangeDelete struct { + // Precisely one of ID, IDFields must be set. + ID *string `json:"id,omitempty"` + + // Precisely one of ID, IDFields must be set. + IDFields *RecordIdentifier `json:"id_fields,omitempty"` +} + +// RecordChangeSet: record change set. +type RecordChangeSet struct { + // Precisely one of ID, IDFields must be set. + ID *string `json:"id,omitempty"` + + // Precisely one of ID, IDFields must be set. + IDFields *RecordIdentifier `json:"id_fields,omitempty"` + + Records []*Record `json:"records"` +} + +// ImportRawDNSZoneRequestTsigKey: import raw dns zone request tsig key. +type ImportRawDNSZoneRequestTsigKey struct { + Name string `json:"name"` + + Key string `json:"key"` + + Algorithm string `json:"algorithm"` } // Contact: contact. type Contact struct { ID string `json:"id"` + // LegalForm: default value: legal_form_unknown LegalForm ContactLegalForm `json:"legal_form"` @@ -1181,13 +1404,15 @@ type Contact struct { Country string `json:"country"` - VatIdentificationCode string `json:"vat_identification_code"` + VatIDentificationCode string `json:"vat_identification_code"` + + CompanyIDentificationCode string `json:"company_identification_code"` - CompanyIdentificationCode string `json:"company_identification_code"` // Lang: default value: unknown_language_code - Lang LanguageCode `json:"lang"` + Lang std.LanguageCode `json:"lang"` Resale bool `json:"resale"` + // Deprecated Questions *[]*ContactQuestion `json:"questions,omitempty"` @@ -1196,6 +1421,7 @@ type Contact struct { ExtensionEu *ContactExtensionEU `json:"extension_eu"` WhoisOptIn bool `json:"whois_opt_in"` + // EmailStatus: default value: email_status_unknown EmailStatus ContactEmailStatus `json:"email_status"` @@ -1204,73 +1430,7 @@ type Contact struct { ExtensionNl *ContactExtensionNL `json:"extension_nl"` } -type ContactExtensionEU struct { - EuropeanCitizenship string `json:"european_citizenship"` -} - -type ContactExtensionFR struct { - // Mode: default value: mode_unknown - Mode ContactExtensionFRMode `json:"mode"` - - // Precisely one of AssociationInfo, CodeAuthAfnicInfo, DunsInfo, IndividualInfo, TrademarkInfo must be set. - IndividualInfo *ContactExtensionFRIndividualInfo `json:"individual_info,omitempty"` - - // Precisely one of AssociationInfo, CodeAuthAfnicInfo, DunsInfo, IndividualInfo, TrademarkInfo must be set. - DunsInfo *ContactExtensionFRDunsInfo `json:"duns_info,omitempty"` - - // Precisely one of AssociationInfo, CodeAuthAfnicInfo, DunsInfo, IndividualInfo, TrademarkInfo must be set. - AssociationInfo *ContactExtensionFRAssociationInfo `json:"association_info,omitempty"` - - // Precisely one of AssociationInfo, CodeAuthAfnicInfo, DunsInfo, IndividualInfo, TrademarkInfo must be set. - TrademarkInfo *ContactExtensionFRTrademarkInfo `json:"trademark_info,omitempty"` - - // Precisely one of AssociationInfo, CodeAuthAfnicInfo, DunsInfo, IndividualInfo, TrademarkInfo must be set. - CodeAuthAfnicInfo *ContactExtensionFRCodeAuthAfnicInfo `json:"code_auth_afnic_info,omitempty"` -} - -type ContactExtensionFRAssociationInfo struct { - PublicationJo *time.Time `json:"publication_jo"` - - PublicationJoPage uint32 `json:"publication_jo_page"` -} - -type ContactExtensionFRCodeAuthAfnicInfo struct { - CodeAuthAfnic string `json:"code_auth_afnic"` -} - -type ContactExtensionFRDunsInfo struct { - DunsID string `json:"duns_id"` - - LocalID string `json:"local_id"` -} - -type ContactExtensionFRIndividualInfo struct { - WhoisOptIn bool `json:"whois_opt_in"` -} - -type ContactExtensionFRTrademarkInfo struct { - TrademarkInpi string `json:"trademark_inpi"` -} - -type ContactExtensionNL struct { - // LegalForm: default value: legal_form_unknown - LegalForm ContactExtensionNLLegalForm `json:"legal_form"` - - LegalFormRegistrationNumber string `json:"legal_form_registration_number"` -} - -type ContactQuestion struct { - Question string `json:"question"` - - Answer string `json:"answer"` -} - -type ContactRoles struct { - Contact *Contact `json:"contact"` - - Roles map[string]*ContactRolesRoles `json:"roles"` -} - +// ContactRolesRoles: contact roles roles. type ContactRolesRoles struct { IsOwner bool `json:"is_owner"` @@ -1279,121 +1439,12 @@ type ContactRolesRoles struct { IsTechnical bool `json:"is_technical"` } -type DNSZone struct { - Domain string `json:"domain"` - - Subdomain string `json:"subdomain"` - - Ns []string `json:"ns"` - - NsDefault []string `json:"ns_default"` - - NsMaster []string `json:"ns_master"` - // Status: default value: unknown - Status DNSZoneStatus `json:"status"` - - Message *string `json:"message"` - - UpdatedAt *time.Time `json:"updated_at"` - - ProjectID string `json:"project_id"` -} - -type DNSZoneVersion struct { - ID string `json:"id"` - - CreatedAt *time.Time `json:"created_at"` -} - -type DSRecord struct { - KeyID uint32 `json:"key_id"` - // Algorithm: default value: rsamd5 - Algorithm DSRecordAlgorithm `json:"algorithm"` - - // Precisely one of Digest, PublicKey must be set. - Digest *DSRecordDigest `json:"digest,omitempty"` - - // Precisely one of Digest, PublicKey must be set. - PublicKey *DSRecordPublicKey `json:"public_key,omitempty"` -} - -type DSRecordDigest struct { - // Type: default value: sha_1 - Type DSRecordDigestType `json:"type"` - - Digest string `json:"digest"` - - PublicKey *DSRecordPublicKey `json:"public_key"` -} - -type DSRecordPublicKey struct { - Key string `json:"key"` -} - -// DeleteDNSZoneResponse: delete dns zone response. -type DeleteDNSZoneResponse struct { -} - -// DeleteExternalDomainResponse: delete external domain response. -type DeleteExternalDomainResponse struct { -} - -// DeleteSSLCertificateResponse: delete ssl certificate response. -type DeleteSSLCertificateResponse struct { -} - -// Domain: domain. -type Domain struct { - Domain string `json:"domain"` - - OrganizationID string `json:"organization_id"` - - ProjectID string `json:"project_id"` - // AutoRenewStatus: default value: feature_status_unknown - AutoRenewStatus DomainFeatureStatus `json:"auto_renew_status"` - - Dnssec *DomainDNSSEC `json:"dnssec"` - - EppCode []string `json:"epp_code"` - - ExpiredAt *time.Time `json:"expired_at"` - - UpdatedAt *time.Time `json:"updated_at"` - - Registrar string `json:"registrar"` - - IsExternal bool `json:"is_external"` - // Status: default value: status_unknown - Status DomainStatus `json:"status"` - - DNSZones []*DNSZone `json:"dns_zones"` - - OwnerContact *Contact `json:"owner_contact"` - - TechnicalContact *Contact `json:"technical_contact"` - - AdministrativeContact *Contact `json:"administrative_contact"` - - // Precisely one of ExternalDomainRegistrationStatus, TransferRegistrationStatus must be set. - ExternalDomainRegistrationStatus *DomainRegistrationStatusExternalDomain `json:"external_domain_registration_status,omitempty"` - - // Precisely one of ExternalDomainRegistrationStatus, TransferRegistrationStatus must be set. - TransferRegistrationStatus *DomainRegistrationStatusTransfer `json:"transfer_registration_status,omitempty"` - - Tld *Tld `json:"tld"` -} - -type DomainDNSSEC struct { - // Status: default value: feature_status_unknown - Status DomainFeatureStatus `json:"status"` - - DsRecords []*DSRecord `json:"ds_records"` -} - +// DomainRegistrationStatusExternalDomain: domain registration status external domain. type DomainRegistrationStatusExternalDomain struct { ValidationToken string `json:"validation_token"` } +// DomainRegistrationStatusTransfer: domain registration status transfer. type DomainRegistrationStatusTransfer struct { // Status: default value: status_unknown Status DomainRegistrationStatusTransferStatus `json:"status"` @@ -1403,183 +1454,24 @@ type DomainRegistrationStatusTransfer struct { VoteNewOwner bool `json:"vote_new_owner"` } -type DomainSummary struct { - Domain string `json:"domain"` - - ProjectID string `json:"project_id"` - // AutoRenewStatus: default value: feature_status_unknown - AutoRenewStatus DomainFeatureStatus `json:"auto_renew_status"` - // DnssecStatus: default value: feature_status_unknown - DnssecStatus DomainFeatureStatus `json:"dnssec_status"` - - EppCode []string `json:"epp_code"` - - ExpiredAt *time.Time `json:"expired_at"` - - UpdatedAt *time.Time `json:"updated_at"` - - Registrar string `json:"registrar"` - - IsExternal bool `json:"is_external"` - // Status: default value: status_unknown - Status DomainStatus `json:"status"` - - // Precisely one of ExternalDomainRegistrationStatus, TransferRegistrationStatus must be set. - ExternalDomainRegistrationStatus *DomainRegistrationStatusExternalDomain `json:"external_domain_registration_status,omitempty"` - - // Precisely one of ExternalDomainRegistrationStatus, TransferRegistrationStatus must be set. - TransferRegistrationStatus *DomainRegistrationStatusTransfer `json:"transfer_registration_status,omitempty"` - - OrganizationID string `json:"organization_id"` -} - -// GetDNSZoneTsigKeyResponse: get dns zone tsig key response. -type GetDNSZoneTsigKeyResponse struct { +// Tld: tld. +type Tld struct { Name string `json:"name"` - Key string `json:"key"` + DnssecSupport bool `json:"dnssec_support"` - Algorithm string `json:"algorithm"` -} - -// GetDNSZoneVersionDiffResponse: get dns zone version diff response. -type GetDNSZoneVersionDiffResponse struct { - Changes []*RecordChange `json:"changes"` -} - -// GetDomainAuthCodeResponse: get domain auth code response. -type GetDomainAuthCodeResponse struct { - AuthCode string `json:"auth_code"` -} - -type Host struct { - Domain string `json:"domain"` - - Name string `json:"name"` - - IPs []net.IP `json:"ips"` - // Status: default value: unknown_status - Status HostStatus `json:"status"` -} - -type ImportProviderDNSZoneRequestOnlineV1 struct { - Token string `json:"token"` -} - -// ImportProviderDNSZoneResponse: import provider dns zone response. -type ImportProviderDNSZoneResponse struct { - Records []*Record `json:"records"` -} - -type ImportRawDNSZoneRequestAXFRSource struct { - NameServer string `json:"name_server"` - - TsigKey *ImportRawDNSZoneRequestTsigKey `json:"tsig_key"` -} - -type ImportRawDNSZoneRequestBindSource struct { - Content string `json:"content"` -} - -type ImportRawDNSZoneRequestTsigKey struct { - Name string `json:"name"` - - Key string `json:"key"` - - Algorithm string `json:"algorithm"` -} - -// ImportRawDNSZoneResponse: import raw dns zone response. -type ImportRawDNSZoneResponse struct { - Records []*Record `json:"records"` -} - -// ListContactsResponse: list contacts response. -type ListContactsResponse struct { - TotalCount uint32 `json:"total_count"` - - Contacts []*ContactRoles `json:"contacts"` -} - -// ListDNSZoneNameserversResponse: list dns zone nameservers response. -type ListDNSZoneNameserversResponse struct { - // Ns: DNS zone name servers returned. - Ns []*Nameserver `json:"ns"` -} - -// ListDNSZoneRecordsResponse: list dns zone records response. -type ListDNSZoneRecordsResponse struct { - // TotalCount: total number of DNS zone records. - TotalCount uint32 `json:"total_count"` - // Records: paginated returned DNS zone records. - Records []*Record `json:"records"` -} - -// ListDNSZoneVersionRecordsResponse: list dns zone version records response. -type ListDNSZoneVersionRecordsResponse struct { - // TotalCount: total number of DNS zones versions records. - TotalCount uint32 `json:"total_count"` - - Records []*Record `json:"records"` -} - -// ListDNSZoneVersionsResponse: list dns zone versions response. -type ListDNSZoneVersionsResponse struct { - // TotalCount: total number of DNS zones versions. - TotalCount uint32 `json:"total_count"` - - Versions []*DNSZoneVersion `json:"versions"` -} - -// ListDNSZonesResponse: list dns zones response. -type ListDNSZonesResponse struct { - // TotalCount: total number of DNS zones matching the requested criteria. - TotalCount uint32 `json:"total_count"` - // DNSZones: paginated returned DNS zones. - DNSZones []*DNSZone `json:"dns_zones"` -} - -// ListDomainHostsResponse: list domain hosts response. -type ListDomainHostsResponse struct { - TotalCount uint32 `json:"total_count"` - - Hosts []*Host `json:"hosts"` -} - -// ListDomainsResponse: list domains response. -type ListDomainsResponse struct { - TotalCount uint32 `json:"total_count"` - - Domains []*DomainSummary `json:"domains"` -} - -// ListRenewableDomainsResponse: list renewable domains response. -type ListRenewableDomainsResponse struct { - TotalCount uint32 `json:"total_count"` - - Domains []*RenewableDomain `json:"domains"` -} - -// ListSSLCertificatesResponse: list ssl certificates response. -type ListSSLCertificatesResponse struct { - TotalCount uint32 `json:"total_count"` - - Certificates []*SSLCertificate `json:"certificates"` -} - -// ListTasksResponse: list tasks response. -type ListTasksResponse struct { - TotalCount uint32 `json:"total_count"` - - Tasks []*Task `json:"tasks"` -} - -type Nameserver struct { - Name string `json:"name"` - - IP []string `json:"ip"` + DurationInYearsMin uint32 `json:"duration_in_years_min"` + + DurationInYearsMax uint32 `json:"duration_in_years_max"` + + IDnSupport bool `json:"idn_support"` + + Offers map[string]*TldOffer `json:"offers"` + + Specifications map[string]string `json:"specifications"` } +// NewContact: new contact. type NewContact struct { // LegalForm: default value: legal_form_unknown LegalForm ContactLegalForm `json:"legal_form"` @@ -1608,13 +1500,15 @@ type NewContact struct { Country string `json:"country"` - VatIdentificationCode *string `json:"vat_identification_code"` + VatIDentificationCode *string `json:"vat_identification_code"` + + CompanyIDentificationCode *string `json:"company_identification_code"` - CompanyIdentificationCode *string `json:"company_identification_code"` // Lang: default value: unknown_language_code - Lang LanguageCode `json:"lang"` + Lang std.LanguageCode `json:"lang"` Resale bool `json:"resale"` + // Deprecated Questions *[]*ContactQuestion `json:"questions,omitempty"` @@ -1629,168 +1523,152 @@ type NewContact struct { ExtensionNl *ContactExtensionNL `json:"extension_nl"` } -type OrderResponse struct { - Domains []string `json:"domains"` +// CheckContactsCompatibilityResponseContactCheckResult: check contacts compatibility response contact check result. +type CheckContactsCompatibilityResponseContactCheckResult struct { + Compatible bool `json:"compatible"` - OrganizationID string `json:"organization_id"` + ErrorMessage *string `json:"error_message"` +} + +// DNSZone: dns zone. +type DNSZone struct { + Domain string `json:"domain"` + + Subdomain string `json:"subdomain"` + + Ns []string `json:"ns"` + + NsDefault []string `json:"ns_default"` + + NsMaster []string `json:"ns_master"` + + // Status: default value: unknown + Status DNSZoneStatus `json:"status"` + + Message *string `json:"message"` + + UpdatedAt *time.Time `json:"updated_at"` ProjectID string `json:"project_id"` - TaskID string `json:"task_id"` - - CreatedAt *time.Time `json:"created_at"` + LinkedProducts []LinkedProduct `json:"linked_products"` } -type Record struct { - Data string `json:"data"` +// DomainDNSSEC: domain dnssec. +type DomainDNSSEC struct { + // Status: default value: feature_status_unknown + Status DomainFeatureStatus `json:"status"` - Name string `json:"name"` - - Priority uint32 `json:"priority"` - - TTL uint32 `json:"ttl"` - // Type: default value: unknown - Type RecordType `json:"type"` - - Comment *string `json:"comment"` - - // Precisely one of GeoIPConfig, HTTPServiceConfig, ViewConfig, WeightedConfig must be set. - GeoIPConfig *RecordGeoIPConfig `json:"geo_ip_config,omitempty"` - - // Precisely one of GeoIPConfig, HTTPServiceConfig, ViewConfig, WeightedConfig must be set. - HTTPServiceConfig *RecordHTTPServiceConfig `json:"http_service_config,omitempty"` - - // Precisely one of GeoIPConfig, HTTPServiceConfig, ViewConfig, WeightedConfig must be set. - WeightedConfig *RecordWeightedConfig `json:"weighted_config,omitempty"` - - // Precisely one of GeoIPConfig, HTTPServiceConfig, ViewConfig, WeightedConfig must be set. - ViewConfig *RecordViewConfig `json:"view_config,omitempty"` - - ID string `json:"id"` + DsRecords []*DSRecord `json:"ds_records"` } +// RecordChange: record change. type RecordChange struct { - - // Precisely one of Add, Clear, Delete, Set must be set. + // Precisely one of Add, Set, Delete, Clear must be set. Add *RecordChangeAdd `json:"add,omitempty"` - // Precisely one of Add, Clear, Delete, Set must be set. + // Precisely one of Add, Set, Delete, Clear must be set. Set *RecordChangeSet `json:"set,omitempty"` - // Precisely one of Add, Clear, Delete, Set must be set. + // Precisely one of Add, Set, Delete, Clear must be set. Delete *RecordChangeDelete `json:"delete,omitempty"` - // Precisely one of Add, Clear, Delete, Set must be set. + // Precisely one of Add, Set, Delete, Clear must be set. Clear *RecordChangeClear `json:"clear,omitempty"` } -type RecordChangeAdd struct { - Records []*Record `json:"records"` +// ImportProviderDNSZoneRequestOnlineV1: import provider dns zone request online v1. +type ImportProviderDNSZoneRequestOnlineV1 struct { + Token string `json:"token"` } -type RecordChangeClear struct { +// ImportRawDNSZoneRequestAXFRSource: import raw dns zone request axfr source. +type ImportRawDNSZoneRequestAXFRSource struct { + NameServer string `json:"name_server"` + + TsigKey *ImportRawDNSZoneRequestTsigKey `json:"tsig_key"` } -type RecordChangeDelete struct { - - // Precisely one of ID, IDFields must be set. - ID *string `json:"id,omitempty"` - - // Precisely one of ID, IDFields must be set. - IDFields *RecordIdentifier `json:"id_fields,omitempty"` +// ImportRawDNSZoneRequestBindSource: import raw dns zone request bind source. +type ImportRawDNSZoneRequestBindSource struct { + Content string `json:"content"` } -type RecordChangeSet struct { +// ContactRoles: contact roles. +type ContactRoles struct { + Contact *Contact `json:"contact"` - // Precisely one of ID, IDFields must be set. - ID *string `json:"id,omitempty"` - - // Precisely one of ID, IDFields must be set. - IDFields *RecordIdentifier `json:"id_fields,omitempty"` - - Records []*Record `json:"records"` + Roles map[string]*ContactRolesRoles `json:"roles"` } -type RecordGeoIPConfig struct { - Matches []*RecordGeoIPConfigMatch `json:"matches"` - - Default string `json:"default"` -} - -type RecordGeoIPConfigMatch struct { - Countries []string `json:"countries"` - - Continents []string `json:"continents"` - - Data string `json:"data"` -} - -type RecordHTTPServiceConfig struct { - IPs []net.IP `json:"ips"` - - MustContain *string `json:"must_contain"` - - URL string `json:"url"` - - UserAgent *string `json:"user_agent"` - // Strategy: default value: random - Strategy RecordHTTPServiceConfigStrategy `json:"strategy"` -} - -type RecordIdentifier struct { +// Nameserver: nameserver. +type Nameserver struct { Name string `json:"name"` - // Type: default value: unknown - Type RecordType `json:"type"` - Data *string `json:"data"` - - TTL *uint32 `json:"ttl"` + IP []string `json:"ip"` } -type RecordViewConfig struct { - Views []*RecordViewConfigView `json:"views"` -} - -type RecordViewConfigView struct { - Subnet string `json:"subnet"` - - Data string `json:"data"` -} - -type RecordWeightedConfig struct { - WeightedIPs []*RecordWeightedConfigWeightedIP `json:"weighted_ips"` -} - -type RecordWeightedConfigWeightedIP struct { - IP net.IP `json:"ip"` - - Weight uint32 `json:"weight"` -} - -// RefreshDNSZoneResponse: refresh dns zone response. -type RefreshDNSZoneResponse struct { - // DNSZones: DNS zones returned. - DNSZones []*DNSZone `json:"dns_zones"` -} - -type RegisterExternalDomainResponse struct { - Domain string `json:"domain"` - - OrganizationID string `json:"organization_id"` - - ValidationToken string `json:"validation_token"` +// DNSZoneVersion: dns zone version. +type DNSZoneVersion struct { + ID string `json:"id"` CreatedAt *time.Time `json:"created_at"` - - ProjectID string `json:"project_id"` } +// Host: host. +type Host struct { + Domain string `json:"domain"` + + Name string `json:"name"` + + IPs []net.IP `json:"ips"` + + // Status: default value: unknown_status + Status HostStatus `json:"status"` +} + +// DomainSummary: domain summary. +type DomainSummary struct { + Domain string `json:"domain"` + + ProjectID string `json:"project_id"` + + // AutoRenewStatus: default value: feature_status_unknown + AutoRenewStatus DomainFeatureStatus `json:"auto_renew_status"` + + // DnssecStatus: default value: feature_status_unknown + DnssecStatus DomainFeatureStatus `json:"dnssec_status"` + + EppCode []string `json:"epp_code"` + + ExpiredAt *time.Time `json:"expired_at"` + + UpdatedAt *time.Time `json:"updated_at"` + + Registrar string `json:"registrar"` + + IsExternal bool `json:"is_external"` + + // Status: default value: status_unknown + Status DomainStatus `json:"status"` + + // Precisely one of ExternalDomainRegistrationStatus, TransferRegistrationStatus must be set. + ExternalDomainRegistrationStatus *DomainRegistrationStatusExternalDomain `json:"external_domain_registration_status,omitempty"` + + // Precisely one of ExternalDomainRegistrationStatus, TransferRegistrationStatus must be set. + TransferRegistrationStatus *DomainRegistrationStatusTransfer `json:"transfer_registration_status,omitempty"` + + OrganizationID string `json:"organization_id"` +} + +// RenewableDomain: renewable domain. type RenewableDomain struct { Domain string `json:"domain"` ProjectID string `json:"project_id"` OrganizationID string `json:"organization_id"` + // Status: default value: unknown Status RenewableDomainStatus `json:"status"` @@ -1807,14 +1685,12 @@ type RenewableDomain struct { Tld *Tld `json:"tld"` } -// RestoreDNSZoneVersionResponse: restore dns zone version response. -type RestoreDNSZoneVersionResponse struct { -} - +// SSLCertificate: ssl certificate. type SSLCertificate struct { DNSZone string `json:"dns_zone"` AlternativeDNSZones []string `json:"alternative_dns_zones"` + // Status: default value: unknown Status SSLCertificateStatus `json:"status"` @@ -1827,12 +1703,7 @@ type SSLCertificate struct { ExpiredAt *time.Time `json:"expired_at"` } -// SearchAvailableDomainsResponse: search available domains response. -type SearchAvailableDomainsResponse struct { - // AvailableDomains: array of available domains. - AvailableDomains []*AvailableDomain `json:"available_domains"` -} - +// Task: task. type Task struct { ID string `json:"id"` @@ -1841,8 +1712,10 @@ type Task struct { OrganizationID string `json:"organization_id"` Domain *string `json:"domain"` + // Type: default value: unknown Type TaskType `json:"type"` + // Status: default value: unavailable Status TaskStatus `json:"status"` @@ -1853,86 +1726,1144 @@ type Task struct { Message *string `json:"message"` } -type Tld struct { - Name string `json:"name"` - - DnssecSupport bool `json:"dnssec_support"` - - DurationInYearsMin uint32 `json:"duration_in_years_min"` - - DurationInYearsMax uint32 `json:"duration_in_years_max"` - - IdnSupport bool `json:"idn_support"` - - Offers map[string]*TldOffer `json:"offers"` - - Specifications map[string]string `json:"specifications"` -} - -type TldOffer struct { - Action string `json:"action"` - - OperationPath string `json:"operation_path"` - - Price *scw.Money `json:"price"` -} - +// TransferInDomainRequestTransferRequest: transfer in domain request transfer request. type TransferInDomainRequestTransferRequest struct { Domain string `json:"domain"` AuthCode string `json:"auth_code"` } +// UpdateContactRequestQuestion: update contact request question. type UpdateContactRequestQuestion struct { Question *string `json:"question"` Answer *string `json:"answer"` } +// AvailableDomain: available domain. +type AvailableDomain struct { + Domain string `json:"domain"` + + Available bool `json:"available"` + + Tld *Tld `json:"tld"` +} + +// CheckContactsCompatibilityResponse: check contacts compatibility response. +type CheckContactsCompatibilityResponse struct { + Compatible bool `json:"compatible"` + + OwnerCheckResult *CheckContactsCompatibilityResponseContactCheckResult `json:"owner_check_result"` + + AdministrativeCheckResult *CheckContactsCompatibilityResponseContactCheckResult `json:"administrative_check_result"` + + TechnicalCheckResult *CheckContactsCompatibilityResponseContactCheckResult `json:"technical_check_result"` +} + +// ClearDNSZoneRecordsRequest: clear dns zone records request. +type ClearDNSZoneRecordsRequest struct { + // DNSZone: DNS zone to clear. + DNSZone string `json:"-"` +} + +// ClearDNSZoneRecordsResponse: clear dns zone records response. +type ClearDNSZoneRecordsResponse struct { +} + +// CloneDNSZoneRequest: clone dns zone request. +type CloneDNSZoneRequest struct { + // DNSZone: DNS zone to clone. + DNSZone string `json:"-"` + + // DestDNSZone: destination DNS zone in which to clone the chosen DNS zone. + DestDNSZone string `json:"dest_dns_zone"` + + // Overwrite: specifies whether or not the destination DNS zone will be overwritten. + Overwrite bool `json:"overwrite"` + + // ProjectID: project ID of the destination DNS zone. + ProjectID *string `json:"project_id,omitempty"` +} + +// CreateDNSZoneRequest: create dns zone request. +type CreateDNSZoneRequest struct { + // Domain: domain in which to crreate the DNS zone. + Domain string `json:"domain"` + + // Subdomain: subdomain of the DNS zone to create. + Subdomain string `json:"subdomain"` + + // ProjectID: project ID in which to create the DNS zone. + ProjectID string `json:"project_id"` +} + +// CreateSSLCertificateRequest: create ssl certificate request. +type CreateSSLCertificateRequest struct { + DNSZone string `json:"dns_zone"` + + AlternativeDNSZones []string `json:"alternative_dns_zones"` +} + +// DeleteDNSZoneRequest: delete dns zone request. +type DeleteDNSZoneRequest struct { + // DNSZone: DNS zone to delete. + DNSZone string `json:"-"` + + // ProjectID: project ID of the DNS zone to delete. + ProjectID string `json:"-"` +} + +// DeleteDNSZoneResponse: delete dns zone response. +type DeleteDNSZoneResponse struct { +} + +// DeleteDNSZoneTsigKeyRequest: delete dns zone tsig key request. +type DeleteDNSZoneTsigKeyRequest struct { + DNSZone string `json:"-"` +} + +// DeleteExternalDomainResponse: delete external domain response. +type DeleteExternalDomainResponse struct { +} + +// DeleteSSLCertificateRequest: delete ssl certificate request. +type DeleteSSLCertificateRequest struct { + DNSZone string `json:"-"` +} + +// DeleteSSLCertificateResponse: delete ssl certificate response. +type DeleteSSLCertificateResponse struct { +} + +// Domain: domain. +type Domain struct { + Domain string `json:"domain"` + + OrganizationID string `json:"organization_id"` + + ProjectID string `json:"project_id"` + + // AutoRenewStatus: default value: feature_status_unknown + AutoRenewStatus DomainFeatureStatus `json:"auto_renew_status"` + + Dnssec *DomainDNSSEC `json:"dnssec"` + + EppCode []string `json:"epp_code"` + + ExpiredAt *time.Time `json:"expired_at"` + + UpdatedAt *time.Time `json:"updated_at"` + + Registrar string `json:"registrar"` + + IsExternal bool `json:"is_external"` + + // Status: default value: status_unknown + Status DomainStatus `json:"status"` + + DNSZones []*DNSZone `json:"dns_zones"` + + OwnerContact *Contact `json:"owner_contact"` + + TechnicalContact *Contact `json:"technical_contact"` + + AdministrativeContact *Contact `json:"administrative_contact"` + + // Precisely one of ExternalDomainRegistrationStatus, TransferRegistrationStatus must be set. + ExternalDomainRegistrationStatus *DomainRegistrationStatusExternalDomain `json:"external_domain_registration_status,omitempty"` + + // Precisely one of ExternalDomainRegistrationStatus, TransferRegistrationStatus must be set. + TransferRegistrationStatus *DomainRegistrationStatusTransfer `json:"transfer_registration_status,omitempty"` + + Tld *Tld `json:"tld"` + + LinkedProducts []LinkedProduct `json:"linked_products"` +} + +// ExportRawDNSZoneRequest: export raw dns zone request. +type ExportRawDNSZoneRequest struct { + // DNSZone: DNS zone to export. + DNSZone string `json:"-"` + + // Format: DNS zone format. + // Default value: unknown_raw_format + Format RawFormat `json:"-"` +} + +// GetDNSZoneTsigKeyRequest: get dns zone tsig key request. +type GetDNSZoneTsigKeyRequest struct { + DNSZone string `json:"-"` +} + +// GetDNSZoneTsigKeyResponse: get dns zone tsig key response. +type GetDNSZoneTsigKeyResponse struct { + Name string `json:"name"` + + Key string `json:"key"` + + Algorithm string `json:"algorithm"` +} + +// GetDNSZoneVersionDiffRequest: get dns zone version diff request. +type GetDNSZoneVersionDiffRequest struct { + DNSZoneVersionID string `json:"-"` +} + +// GetDNSZoneVersionDiffResponse: get dns zone version diff response. +type GetDNSZoneVersionDiffResponse struct { + Changes []*RecordChange `json:"changes"` +} + +// GetDomainAuthCodeResponse: get domain auth code response. +type GetDomainAuthCodeResponse struct { + AuthCode string `json:"auth_code"` +} + +// GetSSLCertificateRequest: get ssl certificate request. +type GetSSLCertificateRequest struct { + DNSZone string `json:"-"` +} + +// ImportProviderDNSZoneRequest: import provider dns zone request. +type ImportProviderDNSZoneRequest struct { + DNSZone string `json:"-"` + + // Precisely one of OnlineV1 must be set. + OnlineV1 *ImportProviderDNSZoneRequestOnlineV1 `json:"online_v1,omitempty"` +} + +// ImportProviderDNSZoneResponse: import provider dns zone response. +type ImportProviderDNSZoneResponse struct { + Records []*Record `json:"records"` +} + +// ImportRawDNSZoneRequest: import raw dns zone request. +type ImportRawDNSZoneRequest struct { + // DNSZone: DNS zone to import. + DNSZone string `json:"-"` + + // Deprecated + Content *string `json:"content,omitempty"` + + ProjectID string `json:"project_id"` + + // Deprecated: Format: default value: unknown_raw_format + Format *RawFormat `json:"format,omitempty"` + + // BindSource: import a bind file format. + // Precisely one of BindSource, AxfrSource must be set. + BindSource *ImportRawDNSZoneRequestBindSource `json:"bind_source,omitempty"` + + // AxfrSource: import from the name server given with TSIG, to use or not. + // Precisely one of BindSource, AxfrSource must be set. + AxfrSource *ImportRawDNSZoneRequestAXFRSource `json:"axfr_source,omitempty"` +} + +// ImportRawDNSZoneResponse: import raw dns zone response. +type ImportRawDNSZoneResponse struct { + Records []*Record `json:"records"` +} + +// ListContactsResponse: list contacts response. +type ListContactsResponse struct { + TotalCount uint32 `json:"total_count"` + + Contacts []*ContactRoles `json:"contacts"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListContactsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListContactsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListContactsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Contacts = append(r.Contacts, results.Contacts...) + r.TotalCount += uint32(len(results.Contacts)) + return uint32(len(results.Contacts)), nil +} + +// ListDNSZoneNameserversRequest: list dns zone nameservers request. +type ListDNSZoneNameserversRequest struct { + // DNSZone: DNS zone on which to filter the returned DNS zone name servers. + DNSZone string `json:"-"` + + // ProjectID: project ID on which to filter the returned DNS zone name servers. + ProjectID *string `json:"-"` +} + +// ListDNSZoneNameserversResponse: list dns zone nameservers response. +type ListDNSZoneNameserversResponse struct { + // Ns: DNS zone name servers returned. + Ns []*Nameserver `json:"ns"` +} + +// ListDNSZoneRecordsRequest: list dns zone records request. +type ListDNSZoneRecordsRequest struct { + // DNSZone: DNS zone on which to filter the returned DNS zone records. + DNSZone string `json:"-"` + + // ProjectID: project ID on which to filter the returned DNS zone records. + ProjectID *string `json:"-"` + + // OrderBy: sort order of the returned DNS zone records. + // Default value: name_asc + OrderBy ListDNSZoneRecordsRequestOrderBy `json:"-"` + + // Page: page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: maximum number of DNS zone records per page. + PageSize *uint32 `json:"-"` + + // Name: name on which to filter the returned DNS zone records. + Name string `json:"-"` + + // Type: record type on which to filter the returned DNS zone records. + // Default value: unknown + Type RecordType `json:"-"` + + // ID: record ID on which to filter the returned DNS zone records. + ID *string `json:"-"` +} + +// ListDNSZoneRecordsResponse: list dns zone records response. +type ListDNSZoneRecordsResponse struct { + // TotalCount: total number of DNS zone records. + TotalCount uint32 `json:"total_count"` + + // Records: paginated returned DNS zone records. + Records []*Record `json:"records"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListDNSZoneRecordsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListDNSZoneRecordsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListDNSZoneRecordsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Records = append(r.Records, results.Records...) + r.TotalCount += uint32(len(results.Records)) + return uint32(len(results.Records)), nil +} + +// ListDNSZoneVersionRecordsRequest: list dns zone version records request. +type ListDNSZoneVersionRecordsRequest struct { + DNSZoneVersionID string `json:"-"` + + // Page: page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: maximum number of DNS zones versions records per page. + PageSize *uint32 `json:"-"` +} + +// ListDNSZoneVersionRecordsResponse: list dns zone version records response. +type ListDNSZoneVersionRecordsResponse struct { + // TotalCount: total number of DNS zones versions records. + TotalCount uint32 `json:"total_count"` + + Records []*Record `json:"records"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListDNSZoneVersionRecordsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListDNSZoneVersionRecordsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListDNSZoneVersionRecordsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Records = append(r.Records, results.Records...) + r.TotalCount += uint32(len(results.Records)) + return uint32(len(results.Records)), nil +} + +// ListDNSZoneVersionsRequest: list dns zone versions request. +type ListDNSZoneVersionsRequest struct { + DNSZone string `json:"-"` + + // Page: page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: maximum number of DNS zones versions per page. + PageSize *uint32 `json:"-"` +} + +// ListDNSZoneVersionsResponse: list dns zone versions response. +type ListDNSZoneVersionsResponse struct { + // TotalCount: total number of DNS zones versions. + TotalCount uint32 `json:"total_count"` + + Versions []*DNSZoneVersion `json:"versions"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListDNSZoneVersionsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListDNSZoneVersionsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListDNSZoneVersionsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Versions = append(r.Versions, results.Versions...) + r.TotalCount += uint32(len(results.Versions)) + return uint32(len(results.Versions)), nil +} + +// ListDNSZonesRequest: list dns zones request. +type ListDNSZonesRequest struct { + // OrganizationID: organization ID on which to filter the returned DNS zones. + OrganizationID *string `json:"-"` + + // ProjectID: project ID on which to filter the returned DNS zones. + ProjectID *string `json:"-"` + + // OrderBy: sort order of the returned DNS zones. + // Default value: domain_asc + OrderBy ListDNSZonesRequestOrderBy `json:"-"` + + // Page: page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: maximum number of DNS zones to return per page. + PageSize *uint32 `json:"-"` + + // Domain: domain on which to filter the returned DNS zones. + Domain string `json:"-"` + + // Deprecated: DNSZone: DNS zone on which to filter the returned DNS zones. + DNSZone *string `json:"-"` + + // DNSZones: DNS zones on which to filter the returned DNS zones. + DNSZones []string `json:"-"` + + // CreatedAfter: only list DNS zones created after this date. + CreatedAfter *time.Time `json:"-"` + + // CreatedBefore: only list DNS zones created before this date. + CreatedBefore *time.Time `json:"-"` + + // UpdatedAfter: only list DNS zones updated after this date. + UpdatedAfter *time.Time `json:"-"` + + // UpdatedBefore: only list DNS zones updated before this date. + UpdatedBefore *time.Time `json:"-"` +} + +// ListDNSZonesResponse: list dns zones response. +type ListDNSZonesResponse struct { + // TotalCount: total number of DNS zones matching the requested criteria. + TotalCount uint32 `json:"total_count"` + + // DNSZones: paginated returned DNS zones. + DNSZones []*DNSZone `json:"dns_zones"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListDNSZonesResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListDNSZonesResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListDNSZonesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.DNSZones = append(r.DNSZones, results.DNSZones...) + r.TotalCount += uint32(len(results.DNSZones)) + return uint32(len(results.DNSZones)), nil +} + +// ListDomainHostsResponse: list domain hosts response. +type ListDomainHostsResponse struct { + TotalCount uint32 `json:"total_count"` + + Hosts []*Host `json:"hosts"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListDomainHostsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListDomainHostsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListDomainHostsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Hosts = append(r.Hosts, results.Hosts...) + r.TotalCount += uint32(len(results.Hosts)) + return uint32(len(results.Hosts)), nil +} + +// ListDomainsResponse: list domains response. +type ListDomainsResponse struct { + TotalCount uint32 `json:"total_count"` + + Domains []*DomainSummary `json:"domains"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListDomainsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListDomainsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListDomainsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Domains = append(r.Domains, results.Domains...) + r.TotalCount += uint32(len(results.Domains)) + return uint32(len(results.Domains)), nil +} + +// ListRenewableDomainsResponse: list renewable domains response. +type ListRenewableDomainsResponse struct { + TotalCount uint32 `json:"total_count"` + + Domains []*RenewableDomain `json:"domains"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListRenewableDomainsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListRenewableDomainsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListRenewableDomainsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Domains = append(r.Domains, results.Domains...) + r.TotalCount += uint32(len(results.Domains)) + return uint32(len(results.Domains)), nil +} + +// ListSSLCertificatesRequest: list ssl certificates request. +type ListSSLCertificatesRequest struct { + DNSZone string `json:"-"` + + Page *int32 `json:"-"` + + PageSize *uint32 `json:"-"` + + ProjectID *string `json:"-"` +} + +// ListSSLCertificatesResponse: list ssl certificates response. +type ListSSLCertificatesResponse struct { + TotalCount uint32 `json:"total_count"` + + Certificates []*SSLCertificate `json:"certificates"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListSSLCertificatesResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListSSLCertificatesResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListSSLCertificatesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Certificates = append(r.Certificates, results.Certificates...) + r.TotalCount += uint32(len(results.Certificates)) + return uint32(len(results.Certificates)), nil +} + +// ListTasksResponse: list tasks response. +type ListTasksResponse struct { + TotalCount uint32 `json:"total_count"` + + Tasks []*Task `json:"tasks"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListTasksResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListTasksResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListTasksResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Tasks = append(r.Tasks, results.Tasks...) + r.TotalCount += uint32(len(results.Tasks)) + return uint32(len(results.Tasks)), nil +} + +// ListTldsResponse: list tlds response. +type ListTldsResponse struct { + // Tlds: array of TLDs. + Tlds []*Tld `json:"tlds"` + + // TotalCount: total count of TLDs returned. + TotalCount uint64 `json:"total_count"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListTldsResponse) UnsafeGetTotalCount() uint64 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListTldsResponse) UnsafeAppend(res interface{}) (uint64, error) { + results, ok := res.(*ListTldsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Tlds = append(r.Tlds, results.Tlds...) + r.TotalCount += uint64(len(results.Tlds)) + return uint64(len(results.Tlds)), nil +} + +// OrderResponse: order response. +type OrderResponse struct { + Domains []string `json:"domains"` + + OrganizationID string `json:"organization_id"` + + ProjectID string `json:"project_id"` + + TaskID string `json:"task_id"` + + CreatedAt *time.Time `json:"created_at"` +} + +// RefreshDNSZoneRequest: refresh dns zone request. +type RefreshDNSZoneRequest struct { + // DNSZone: DNS zone to refresh. + DNSZone string `json:"-"` + + // RecreateDNSZone: specifies whether or not to recreate the DNS zone. + RecreateDNSZone bool `json:"recreate_dns_zone"` + + // RecreateSubDNSZone: specifies whether or not to recreate the sub DNS zone. + RecreateSubDNSZone bool `json:"recreate_sub_dns_zone"` +} + +// RefreshDNSZoneResponse: refresh dns zone response. +type RefreshDNSZoneResponse struct { + // DNSZones: DNS zones returned. + DNSZones []*DNSZone `json:"dns_zones"` +} + +// RegisterExternalDomainResponse: register external domain response. +type RegisterExternalDomainResponse struct { + Domain string `json:"domain"` + + OrganizationID string `json:"organization_id"` + + ValidationToken string `json:"validation_token"` + + CreatedAt *time.Time `json:"created_at"` + + ProjectID string `json:"project_id"` +} + +// RegistrarAPIBuyDomainsRequest: registrar api buy domains request. +type RegistrarAPIBuyDomainsRequest struct { + Domains []string `json:"domains"` + + DurationInYears uint32 `json:"duration_in_years"` + + ProjectID string `json:"project_id"` + + // Precisely one of OwnerContactID, OwnerContact must be set. + OwnerContactID *string `json:"owner_contact_id,omitempty"` + + // Precisely one of OwnerContactID, OwnerContact must be set. + OwnerContact *NewContact `json:"owner_contact,omitempty"` + + // Precisely one of AdministrativeContactID, AdministrativeContact must be set. + AdministrativeContactID *string `json:"administrative_contact_id,omitempty"` + + // Precisely one of AdministrativeContactID, AdministrativeContact must be set. + AdministrativeContact *NewContact `json:"administrative_contact,omitempty"` + + // Precisely one of TechnicalContactID, TechnicalContact must be set. + TechnicalContactID *string `json:"technical_contact_id,omitempty"` + + // Precisely one of TechnicalContactID, TechnicalContact must be set. + TechnicalContact *NewContact `json:"technical_contact,omitempty"` +} + +// RegistrarAPICheckContactsCompatibilityRequest: registrar api check contacts compatibility request. +type RegistrarAPICheckContactsCompatibilityRequest struct { + Domains []string `json:"domains"` + + Tlds []string `json:"tlds"` + + // Precisely one of OwnerContactID, OwnerContact must be set. + OwnerContactID *string `json:"owner_contact_id,omitempty"` + + // Precisely one of OwnerContactID, OwnerContact must be set. + OwnerContact *NewContact `json:"owner_contact,omitempty"` + + // Precisely one of AdministrativeContactID, AdministrativeContact must be set. + AdministrativeContactID *string `json:"administrative_contact_id,omitempty"` + + // Precisely one of AdministrativeContactID, AdministrativeContact must be set. + AdministrativeContact *NewContact `json:"administrative_contact,omitempty"` + + // Precisely one of TechnicalContactID, TechnicalContact must be set. + TechnicalContactID *string `json:"technical_contact_id,omitempty"` + + // Precisely one of TechnicalContactID, TechnicalContact must be set. + TechnicalContact *NewContact `json:"technical_contact,omitempty"` +} + +// RegistrarAPICreateDomainHostRequest: registrar api create domain host request. +type RegistrarAPICreateDomainHostRequest struct { + Domain string `json:"-"` + + Name string `json:"name"` + + IPs []net.IP `json:"ips"` +} + +// RegistrarAPIDeleteDomainHostRequest: registrar api delete domain host request. +type RegistrarAPIDeleteDomainHostRequest struct { + Domain string `json:"-"` + + Name string `json:"-"` +} + +// RegistrarAPIDeleteExternalDomainRequest: registrar api delete external domain request. +type RegistrarAPIDeleteExternalDomainRequest struct { + Domain string `json:"-"` +} + +// RegistrarAPIDisableDomainAutoRenewRequest: registrar api disable domain auto renew request. +type RegistrarAPIDisableDomainAutoRenewRequest struct { + Domain string `json:"-"` +} + +// RegistrarAPIDisableDomainDNSSECRequest: registrar api disable domain dnssec request. +type RegistrarAPIDisableDomainDNSSECRequest struct { + Domain string `json:"-"` +} + +// RegistrarAPIEnableDomainAutoRenewRequest: registrar api enable domain auto renew request. +type RegistrarAPIEnableDomainAutoRenewRequest struct { + Domain string `json:"-"` +} + +// RegistrarAPIEnableDomainDNSSECRequest: registrar api enable domain dnssec request. +type RegistrarAPIEnableDomainDNSSECRequest struct { + Domain string `json:"-"` + + DsRecord *DSRecord `json:"ds_record,omitempty"` +} + +// RegistrarAPIGetContactRequest: registrar api get contact request. +type RegistrarAPIGetContactRequest struct { + ContactID string `json:"-"` +} + +// RegistrarAPIGetDomainAuthCodeRequest: registrar api get domain auth code request. +type RegistrarAPIGetDomainAuthCodeRequest struct { + Domain string `json:"-"` +} + +// RegistrarAPIGetDomainRequest: registrar api get domain request. +type RegistrarAPIGetDomainRequest struct { + Domain string `json:"-"` +} + +// RegistrarAPIListContactsRequest: registrar api list contacts request. +type RegistrarAPIListContactsRequest struct { + Page *int32 `json:"-"` + + PageSize *uint32 `json:"-"` + + Domain *string `json:"-"` + + ProjectID *string `json:"-"` + + OrganizationID *string `json:"-"` + + // Role: default value: unknown_role + Role ListContactsRequestRole `json:"-"` + + // EmailStatus: default value: email_status_unknown + EmailStatus ContactEmailStatus `json:"-"` +} + +// RegistrarAPIListDomainHostsRequest: registrar api list domain hosts request. +type RegistrarAPIListDomainHostsRequest struct { + Domain string `json:"-"` + + Page *int32 `json:"-"` + + PageSize *uint32 `json:"-"` +} + +// RegistrarAPIListDomainsRequest: registrar api list domains request. +type RegistrarAPIListDomainsRequest struct { + Page *int32 `json:"-"` + + PageSize *uint32 `json:"-"` + + // OrderBy: default value: domain_asc + OrderBy ListDomainsRequestOrderBy `json:"-"` + + Registrar *string `json:"-"` + + // Status: default value: status_unknown + Status DomainStatus `json:"-"` + + ProjectID *string `json:"-"` + + OrganizationID *string `json:"-"` + + IsExternal *bool `json:"-"` + + Domain *string `json:"-"` +} + +// RegistrarAPIListRenewableDomainsRequest: registrar api list renewable domains request. +type RegistrarAPIListRenewableDomainsRequest struct { + Page *int32 `json:"-"` + + PageSize *uint32 `json:"-"` + + // OrderBy: default value: domain_asc + OrderBy ListRenewableDomainsRequestOrderBy `json:"-"` + + ProjectID *string `json:"-"` + + OrganizationID *string `json:"-"` +} + +// RegistrarAPIListTasksRequest: registrar api list tasks request. +type RegistrarAPIListTasksRequest struct { + Page *int32 `json:"-"` + + PageSize *uint32 `json:"-"` + + ProjectID *string `json:"-"` + + OrganizationID *string `json:"-"` + + Domain *string `json:"-"` + + Types []TaskType `json:"-"` + + Statuses []TaskStatus `json:"-"` + + // OrderBy: default value: domain_desc + OrderBy ListTasksRequestOrderBy `json:"-"` +} + +// RegistrarAPIListTldsRequest: registrar api list tlds request. +type RegistrarAPIListTldsRequest struct { + // Tlds: array of TLDs to return. + Tlds []string `json:"-"` + + // Page: page number for the returned Projects. + Page *int32 `json:"-"` + + // PageSize: maximum number of Project per page. + PageSize *uint32 `json:"-"` + + // OrderBy: sort order of the returned TLDs. + // Default value: name_asc + OrderBy ListTldsRequestOrderBy `json:"-"` +} + +// RegistrarAPILockDomainTransferRequest: registrar api lock domain transfer request. +type RegistrarAPILockDomainTransferRequest struct { + Domain string `json:"-"` +} + +// RegistrarAPIRegisterExternalDomainRequest: registrar api register external domain request. +type RegistrarAPIRegisterExternalDomainRequest struct { + Domain string `json:"domain"` + + ProjectID string `json:"project_id"` +} + +// RegistrarAPIRenewDomainsRequest: registrar api renew domains request. +type RegistrarAPIRenewDomainsRequest struct { + Domains []string `json:"domains"` + + DurationInYears uint32 `json:"duration_in_years"` + + ForceLateRenewal *bool `json:"force_late_renewal,omitempty"` +} + +// RegistrarAPISearchAvailableDomainsRequest: registrar api search available domains request. +type RegistrarAPISearchAvailableDomainsRequest struct { + // Domains: a list of domain to search, TLD is optional. + Domains []string `json:"-"` + + // Tlds: array of tlds to search on. + Tlds []string `json:"-"` + + // StrictSearch: search exact match. + StrictSearch bool `json:"-"` +} + +// RegistrarAPITradeDomainRequest: registrar api trade domain request. +type RegistrarAPITradeDomainRequest struct { + Domain string `json:"-"` + + ProjectID *string `json:"project_id,omitempty"` + + // Precisely one of NewOwnerContactID, NewOwnerContact must be set. + NewOwnerContactID *string `json:"new_owner_contact_id,omitempty"` + + // Precisely one of NewOwnerContactID, NewOwnerContact must be set. + NewOwnerContact *NewContact `json:"new_owner_contact,omitempty"` +} + +// RegistrarAPITransferInDomainRequest: registrar api transfer in domain request. +type RegistrarAPITransferInDomainRequest struct { + Domains []*TransferInDomainRequestTransferRequest `json:"domains"` + + ProjectID string `json:"project_id"` + + // Precisely one of OwnerContactID, OwnerContact must be set. + OwnerContactID *string `json:"owner_contact_id,omitempty"` + + // Precisely one of OwnerContactID, OwnerContact must be set. + OwnerContact *NewContact `json:"owner_contact,omitempty"` + + // Precisely one of AdministrativeContactID, AdministrativeContact must be set. + AdministrativeContactID *string `json:"administrative_contact_id,omitempty"` + + // Precisely one of AdministrativeContactID, AdministrativeContact must be set. + AdministrativeContact *NewContact `json:"administrative_contact,omitempty"` + + // Precisely one of TechnicalContactID, TechnicalContact must be set. + TechnicalContactID *string `json:"technical_contact_id,omitempty"` + + // Precisely one of TechnicalContactID, TechnicalContact must be set. + TechnicalContact *NewContact `json:"technical_contact,omitempty"` +} + +// RegistrarAPIUnlockDomainTransferRequest: registrar api unlock domain transfer request. +type RegistrarAPIUnlockDomainTransferRequest struct { + Domain string `json:"-"` +} + +// RegistrarAPIUpdateContactRequest: registrar api update contact request. +type RegistrarAPIUpdateContactRequest struct { + ContactID string `json:"-"` + + Email *string `json:"email,omitempty"` + + EmailAlt *string `json:"email_alt,omitempty"` + + PhoneNumber *string `json:"phone_number,omitempty"` + + FaxNumber *string `json:"fax_number,omitempty"` + + AddressLine1 *string `json:"address_line_1,omitempty"` + + AddressLine2 *string `json:"address_line_2,omitempty"` + + Zip *string `json:"zip,omitempty"` + + City *string `json:"city,omitempty"` + + Country *string `json:"country,omitempty"` + + VatIDentificationCode *string `json:"vat_identification_code,omitempty"` + + CompanyIDentificationCode *string `json:"company_identification_code,omitempty"` + + // Lang: default value: unknown_language_code + Lang std.LanguageCode `json:"lang"` + + Resale *bool `json:"resale,omitempty"` + + // Deprecated + Questions *[]*UpdateContactRequestQuestion `json:"questions,omitempty"` + + ExtensionFr *ContactExtensionFR `json:"extension_fr,omitempty"` + + ExtensionEu *ContactExtensionEU `json:"extension_eu,omitempty"` + + WhoisOptIn *bool `json:"whois_opt_in,omitempty"` + + State *string `json:"state,omitempty"` + + ExtensionNl *ContactExtensionNL `json:"extension_nl,omitempty"` +} + +// RegistrarAPIUpdateDomainHostRequest: registrar api update domain host request. +type RegistrarAPIUpdateDomainHostRequest struct { + Domain string `json:"-"` + + Name string `json:"-"` + + IPs *[]string `json:"ips,omitempty"` +} + +// RegistrarAPIUpdateDomainRequest: registrar api update domain request. +type RegistrarAPIUpdateDomainRequest struct { + Domain string `json:"-"` + + // Precisely one of TechnicalContactID, TechnicalContact must be set. + TechnicalContactID *string `json:"technical_contact_id,omitempty"` + + // Precisely one of TechnicalContactID, TechnicalContact must be set. + TechnicalContact *NewContact `json:"technical_contact,omitempty"` + + // Deprecated + // Precisely one of OwnerContactID, OwnerContact must be set. + OwnerContactID *string `json:"owner_contact_id,omitempty"` + + // Deprecated + // Precisely one of OwnerContactID, OwnerContact must be set. + OwnerContact *NewContact `json:"owner_contact,omitempty"` + + // Precisely one of AdministrativeContactID, AdministrativeContact must be set. + AdministrativeContactID *string `json:"administrative_contact_id,omitempty"` + + // Precisely one of AdministrativeContactID, AdministrativeContact must be set. + AdministrativeContact *NewContact `json:"administrative_contact,omitempty"` +} + +// RestoreDNSZoneVersionRequest: restore dns zone version request. +type RestoreDNSZoneVersionRequest struct { + DNSZoneVersionID string `json:"-"` +} + +// RestoreDNSZoneVersionResponse: restore dns zone version response. +type RestoreDNSZoneVersionResponse struct { +} + +// SearchAvailableDomainsResponse: search available domains response. +type SearchAvailableDomainsResponse struct { + // AvailableDomains: array of available domains. + AvailableDomains []*AvailableDomain `json:"available_domains"` +} + +// UpdateDNSZoneNameserversRequest: update dns zone nameservers request. +type UpdateDNSZoneNameserversRequest struct { + // DNSZone: DNS zone in which to update the DNS zone name servers. + DNSZone string `json:"-"` + + // Ns: new DNS zone name servers. + Ns []*Nameserver `json:"ns"` +} + // UpdateDNSZoneNameserversResponse: update dns zone nameservers response. type UpdateDNSZoneNameserversResponse struct { // Ns: DNS zone name servers returned. Ns []*Nameserver `json:"ns"` } +// UpdateDNSZoneRecordsRequest: update dns zone records request. +type UpdateDNSZoneRecordsRequest struct { + // DNSZone: DNS zone in which to update the DNS zone records. + DNSZone string `json:"-"` + + // Changes: changes made to the records. + Changes []*RecordChange `json:"changes"` + + // ReturnAllRecords: specifies whether or not to return all the records. + ReturnAllRecords *bool `json:"return_all_records,omitempty"` + + // DisallowNewZoneCreation: disable the creation of the target zone if it does not exist. Target zone creation is disabled by default. + DisallowNewZoneCreation bool `json:"disallow_new_zone_creation"` + + // Serial: use the provided serial (0) instead of the auto-increment serial. + Serial *uint64 `json:"serial,omitempty"` +} + // UpdateDNSZoneRecordsResponse: update dns zone records response. type UpdateDNSZoneRecordsResponse struct { // Records: DNS zone records returned. Records []*Record `json:"records"` } -// Service API +// UpdateDNSZoneRequest: update dns zone request. +type UpdateDNSZoneRequest struct { + // DNSZone: DNS zone to update. + DNSZone string `json:"-"` -type ListDNSZonesRequest struct { - // OrganizationID: organization ID on which to filter the returned DNS zones. - OrganizationID *string `json:"-"` - // ProjectID: project ID on which to filter the returned DNS zones. - ProjectID *string `json:"-"` - // OrderBy: sort order of the returned DNS zones. - // Default value: domain_asc - OrderBy ListDNSZonesRequestOrderBy `json:"-"` - // Page: page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: maximum number of DNS zones to return per page. - PageSize *uint32 `json:"-"` - // Domain: domain on which to filter the returned DNS zones. - Domain string `json:"-"` - // Deprecated: DNSZone: DNS zone on which to filter the returned DNS zones. - DNSZone *string `json:"-"` - // DNSZones: DNS zones on which to filter the returned DNS zones. - DNSZones []string `json:"-"` - // CreatedAfter: only list DNS zones created after this date. - CreatedAfter *time.Time `json:"-"` - // CreatedBefore: only list DNS zones created before this date. - CreatedBefore *time.Time `json:"-"` - // UpdatedAfter: only list DNS zones updated after this date. - UpdatedAfter *time.Time `json:"-"` - // UpdatedBefore: only list DNS zones updated before this date. - UpdatedBefore *time.Time `json:"-"` + // NewDNSZone: name of the new DNS zone to create. + NewDNSZone *string `json:"new_dns_zone,omitempty"` + + // ProjectID: project ID in which to create the new DNS zone. + ProjectID string `json:"project_id"` } -// ListDNSZones: list DNS zones. -// Retrieve the list of DNS zones you can manage and filter DNS zones associated with specific domain names. +// Manage your domains, DNS zones and records with the Domains and DNS API. +type API struct { + client *scw.Client +} + +// NewAPI returns a API object from a Scaleway client. +func NewAPI(client *scw.Client) *API { + return &API{ + client: client, + } +} + +// ListDNSZones: Retrieve the list of DNS zones you can manage and filter DNS zones associated with specific domain names. func (s *API) ListDNSZones(req *ListDNSZonesRequest, opts ...scw.RequestOption) (*ListDNSZonesResponse, error) { var err error @@ -1956,10 +2887,9 @@ func (s *API) ListDNSZones(req *ListDNSZonesRequest, opts ...scw.RequestOption) parameter.AddToQuery(query, "updated_before", req.UpdatedBefore) scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/dns-zones", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/dns-zones", + Query: query, } var resp ListDNSZonesResponse @@ -1971,17 +2901,7 @@ func (s *API) ListDNSZones(req *ListDNSZonesRequest, opts ...scw.RequestOption) return &resp, nil } -type CreateDNSZoneRequest struct { - // Domain: domain in which to crreate the DNS zone. - Domain string `json:"domain"` - // Subdomain: subdomain of the DNS zone to create. - Subdomain string `json:"subdomain"` - // ProjectID: project ID in which to create the DNS zone. - ProjectID string `json:"project_id"` -} - -// CreateDNSZone: create a DNS zone. -// Create a new DNS zone specified by the domain name, the subdomain and the Project ID. +// CreateDNSZone: Create a new DNS zone specified by the domain name, the subdomain and the Project ID. func (s *API) CreateDNSZone(req *CreateDNSZoneRequest, opts ...scw.RequestOption) (*DNSZone, error) { var err error @@ -1991,9 +2911,8 @@ func (s *API) CreateDNSZone(req *CreateDNSZoneRequest, opts ...scw.RequestOption } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/dns-zones", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/dns-zones", } err = scwReq.SetBody(req) @@ -2010,17 +2929,7 @@ func (s *API) CreateDNSZone(req *CreateDNSZoneRequest, opts ...scw.RequestOption return &resp, nil } -type UpdateDNSZoneRequest struct { - // DNSZone: DNS zone to update. - DNSZone string `json:"-"` - // NewDNSZone: name of the new DNS zone to create. - NewDNSZone *string `json:"new_dns_zone"` - // ProjectID: project ID in which to create the new DNS zone. - ProjectID string `json:"project_id"` -} - -// UpdateDNSZone: update a DNS zone. -// Update the name and/or the Organizations for a DNS zone. +// UpdateDNSZone: Update the name and/or the Organizations for a DNS zone. func (s *API) UpdateDNSZone(req *UpdateDNSZoneRequest, opts ...scw.RequestOption) (*DNSZone, error) { var err error @@ -2034,9 +2943,8 @@ func (s *API) UpdateDNSZone(req *UpdateDNSZoneRequest, opts ...scw.RequestOption } scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "", - Headers: http.Header{}, + Method: "PATCH", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "", } err = scwReq.SetBody(req) @@ -2053,19 +2961,7 @@ func (s *API) UpdateDNSZone(req *UpdateDNSZoneRequest, opts ...scw.RequestOption return &resp, nil } -type CloneDNSZoneRequest struct { - // DNSZone: DNS zone to clone. - DNSZone string `json:"-"` - // DestDNSZone: destination DNS zone in which to clone the chosen DNS zone. - DestDNSZone string `json:"dest_dns_zone"` - // Overwrite: specifies whether or not the destination DNS zone will be overwritten. - Overwrite bool `json:"overwrite"` - // ProjectID: project ID of the destination DNS zone. - ProjectID *string `json:"project_id"` -} - -// CloneDNSZone: clone a DNS zone. -// Clone an existing DNS zone with all its records into a new DNS zone. +// CloneDNSZone: Clone an existing DNS zone with all its records into a new DNS zone. func (s *API) CloneDNSZone(req *CloneDNSZoneRequest, opts ...scw.RequestOption) (*DNSZone, error) { var err error @@ -2074,9 +2970,8 @@ func (s *API) CloneDNSZone(req *CloneDNSZoneRequest, opts ...scw.RequestOption) } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/clone", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/clone", } err = scwReq.SetBody(req) @@ -2093,15 +2988,7 @@ func (s *API) CloneDNSZone(req *CloneDNSZoneRequest, opts ...scw.RequestOption) return &resp, nil } -type DeleteDNSZoneRequest struct { - // DNSZone: DNS zone to delete. - DNSZone string `json:"-"` - // ProjectID: project ID of the DNS zone to delete. - ProjectID string `json:"-"` -} - -// DeleteDNSZone: delete a DNS zone. -// Delete a DNS zone and all its records. +// DeleteDNSZone: Delete a DNS zone and all its records. func (s *API) DeleteDNSZone(req *DeleteDNSZoneRequest, opts ...scw.RequestOption) (*DeleteDNSZoneResponse, error) { var err error @@ -2118,10 +3005,9 @@ func (s *API) DeleteDNSZone(req *DeleteDNSZoneRequest, opts ...scw.RequestOption } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "", - Query: query, - Headers: http.Header{}, + Method: "DELETE", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "", + Query: query, } var resp DeleteDNSZoneResponse @@ -2133,29 +3019,7 @@ func (s *API) DeleteDNSZone(req *DeleteDNSZoneRequest, opts ...scw.RequestOption return &resp, nil } -type ListDNSZoneRecordsRequest struct { - // DNSZone: DNS zone on which to filter the returned DNS zone records. - DNSZone string `json:"-"` - // ProjectID: project ID on which to filter the returned DNS zone records. - ProjectID *string `json:"-"` - // OrderBy: sort order of the returned DNS zone records. - // Default value: name_asc - OrderBy ListDNSZoneRecordsRequestOrderBy `json:"-"` - // Page: page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: maximum number of DNS zone records per page. - PageSize *uint32 `json:"-"` - // Name: name on which to filter the returned DNS zone records. - Name string `json:"-"` - // Type: record type on which to filter the returned DNS zone records. - // Default value: unknown - Type RecordType `json:"-"` - // ID: record ID on which to filter the returned DNS zone records. - ID *string `json:"-"` -} - -// ListDNSZoneRecords: list records within a DNS zone. -// Retrieve a list of DNS records within a DNS zone that has default name servers. +// ListDNSZoneRecords: Retrieve a list of DNS records within a DNS zone that has default name servers. // You can filter records by type and name. func (s *API) ListDNSZoneRecords(req *ListDNSZoneRecordsRequest, opts ...scw.RequestOption) (*ListDNSZoneRecordsResponse, error) { var err error @@ -2179,10 +3043,9 @@ func (s *API) ListDNSZoneRecords(req *ListDNSZoneRecordsRequest, opts ...scw.Req } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/records", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/records", + Query: query, } var resp ListDNSZoneRecordsResponse @@ -2194,21 +3057,7 @@ func (s *API) ListDNSZoneRecords(req *ListDNSZoneRecordsRequest, opts ...scw.Req return &resp, nil } -type UpdateDNSZoneRecordsRequest struct { - // DNSZone: DNS zone in which to update the DNS zone records. - DNSZone string `json:"-"` - // Changes: changes made to the records. - Changes []*RecordChange `json:"changes"` - // ReturnAllRecords: specifies whether or not to return all the records. - ReturnAllRecords *bool `json:"return_all_records"` - // DisallowNewZoneCreation: disable the creation of the target zone if it does not exist. Target zone creation is disabled by default. - DisallowNewZoneCreation bool `json:"disallow_new_zone_creation"` - // Serial: use the provided serial (0) instead of the auto-increment serial. - Serial *uint64 `json:"serial"` -} - -// UpdateDNSZoneRecords: update records within a DNS zone. -// Update records within a DNS zone that has default name servers and perform several actions on your records. +// UpdateDNSZoneRecords: Update records within a DNS zone that has default name servers and perform several actions on your records. // // Actions include: // - add: allows you to add a new record or add a new IP to an existing A record, for example @@ -2225,9 +3074,8 @@ func (s *API) UpdateDNSZoneRecords(req *UpdateDNSZoneRecordsRequest, opts ...scw } scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/records", - Headers: http.Header{}, + Method: "PATCH", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/records", } err = scwReq.SetBody(req) @@ -2244,15 +3092,7 @@ func (s *API) UpdateDNSZoneRecords(req *UpdateDNSZoneRecordsRequest, opts ...scw return &resp, nil } -type ListDNSZoneNameserversRequest struct { - // DNSZone: DNS zone on which to filter the returned DNS zone name servers. - DNSZone string `json:"-"` - // ProjectID: project ID on which to filter the returned DNS zone name servers. - ProjectID *string `json:"-"` -} - -// ListDNSZoneNameservers: list name servers within a DNS zone. -// Retrieve a list of name servers within a DNS zone and their optional glue records. +// ListDNSZoneNameservers: Retrieve a list of name servers within a DNS zone and their optional glue records. func (s *API) ListDNSZoneNameservers(req *ListDNSZoneNameserversRequest, opts ...scw.RequestOption) (*ListDNSZoneNameserversResponse, error) { var err error @@ -2264,10 +3104,9 @@ func (s *API) ListDNSZoneNameservers(req *ListDNSZoneNameserversRequest, opts .. } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/nameservers", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/nameservers", + Query: query, } var resp ListDNSZoneNameserversResponse @@ -2279,15 +3118,7 @@ func (s *API) ListDNSZoneNameservers(req *ListDNSZoneNameserversRequest, opts .. return &resp, nil } -type UpdateDNSZoneNameserversRequest struct { - // DNSZone: DNS zone in which to update the DNS zone name servers. - DNSZone string `json:"-"` - // Ns: new DNS zone name servers. - Ns []*Nameserver `json:"ns"` -} - -// UpdateDNSZoneNameservers: update name servers within a DNS zone. -// Update name servers within a DNS zone and set optional glue records. +// UpdateDNSZoneNameservers: Update name servers within a DNS zone and set optional glue records. func (s *API) UpdateDNSZoneNameservers(req *UpdateDNSZoneNameserversRequest, opts ...scw.RequestOption) (*UpdateDNSZoneNameserversResponse, error) { var err error @@ -2296,9 +3127,8 @@ func (s *API) UpdateDNSZoneNameservers(req *UpdateDNSZoneNameserversRequest, opt } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/nameservers", - Headers: http.Header{}, + Method: "PUT", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/nameservers", } err = scwReq.SetBody(req) @@ -2315,13 +3145,7 @@ func (s *API) UpdateDNSZoneNameservers(req *UpdateDNSZoneNameserversRequest, opt return &resp, nil } -type ClearDNSZoneRecordsRequest struct { - // DNSZone: DNS zone to clear. - DNSZone string `json:"-"` -} - -// ClearDNSZoneRecords: clear records within a DNS zone. -// Delete all records within a DNS zone that has default name servers.
+// ClearDNSZoneRecords: Delete all records within a DNS zone that has default name servers.
// All edits will be versioned. func (s *API) ClearDNSZoneRecords(req *ClearDNSZoneRecordsRequest, opts ...scw.RequestOption) (*ClearDNSZoneRecordsResponse, error) { var err error @@ -2331,9 +3155,8 @@ func (s *API) ClearDNSZoneRecords(req *ClearDNSZoneRecordsRequest, opts ...scw.R } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/records", - Headers: http.Header{}, + Method: "DELETE", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/records", } var resp ClearDNSZoneRecordsResponse @@ -2345,16 +3168,7 @@ func (s *API) ClearDNSZoneRecords(req *ClearDNSZoneRecordsRequest, opts ...scw.R return &resp, nil } -type ExportRawDNSZoneRequest struct { - // DNSZone: DNS zone to export. - DNSZone string `json:"-"` - // Format: DNS zone format. - // Default value: bind - Format RawFormat `json:"-"` -} - -// ExportRawDNSZone: export a raw DNS zone. -// Export a DNS zone with default name servers, in a specific format. +// ExportRawDNSZone: Export a DNS zone with default name servers, in a specific format. func (s *API) ExportRawDNSZone(req *ExportRawDNSZoneRequest, opts ...scw.RequestOption) (*scw.File, error) { var err error @@ -2366,10 +3180,9 @@ func (s *API) ExportRawDNSZone(req *ExportRawDNSZoneRequest, opts ...scw.Request } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/raw", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/raw", + Query: query, } var resp scw.File @@ -2381,25 +3194,7 @@ func (s *API) ExportRawDNSZone(req *ExportRawDNSZoneRequest, opts ...scw.Request return &resp, nil } -type ImportRawDNSZoneRequest struct { - // DNSZone: DNS zone to import. - DNSZone string `json:"-"` - // Deprecated - Content *string `json:"content,omitempty"` - - ProjectID string `json:"project_id"` - // Deprecated: Format: default value: unknown_raw_format - Format *RawFormat `json:"format,omitempty"` - // BindSource: import a bind file format. - // Precisely one of AxfrSource, BindSource must be set. - BindSource *ImportRawDNSZoneRequestBindSource `json:"bind_source,omitempty"` - // AxfrSource: import from the name server given with TSIG, to use or not. - // Precisely one of AxfrSource, BindSource must be set. - AxfrSource *ImportRawDNSZoneRequestAXFRSource `json:"axfr_source,omitempty"` -} - -// ImportRawDNSZone: import a raw DNS zone. -// Import and replace the format of records from a given provider, with default name servers. +// ImportRawDNSZone: Import and replace the format of records from a given provider, with default name servers. func (s *API) ImportRawDNSZone(req *ImportRawDNSZoneRequest, opts ...scw.RequestOption) (*ImportRawDNSZoneResponse, error) { var err error @@ -2413,9 +3208,8 @@ func (s *API) ImportRawDNSZone(req *ImportRawDNSZoneRequest, opts ...scw.Request } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/raw", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/raw", } err = scwReq.SetBody(req) @@ -2432,15 +3226,7 @@ func (s *API) ImportRawDNSZone(req *ImportRawDNSZoneRequest, opts ...scw.Request return &resp, nil } -type ImportProviderDNSZoneRequest struct { - DNSZone string `json:"-"` - - // Precisely one of OnlineV1 must be set. - OnlineV1 *ImportProviderDNSZoneRequestOnlineV1 `json:"online_v1,omitempty"` -} - -// ImportProviderDNSZone: import a DNS zone from another provider. -// Import and replace the format of records from a given provider, with default name servers. +// ImportProviderDNSZone: Import and replace the format of records from a given provider, with default name servers. func (s *API) ImportProviderDNSZone(req *ImportProviderDNSZoneRequest, opts ...scw.RequestOption) (*ImportProviderDNSZoneResponse, error) { var err error @@ -2449,9 +3235,8 @@ func (s *API) ImportProviderDNSZone(req *ImportProviderDNSZoneRequest, opts ...s } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/import-provider", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/import-provider", } err = scwReq.SetBody(req) @@ -2468,17 +3253,7 @@ func (s *API) ImportProviderDNSZone(req *ImportProviderDNSZoneRequest, opts ...s return &resp, nil } -type RefreshDNSZoneRequest struct { - // DNSZone: DNS zone to refresh. - DNSZone string `json:"-"` - // RecreateDNSZone: specifies whether or not to recreate the DNS zone. - RecreateDNSZone bool `json:"recreate_dns_zone"` - // RecreateSubDNSZone: specifies whether or not to recreate the sub DNS zone. - RecreateSubDNSZone bool `json:"recreate_sub_dns_zone"` -} - -// RefreshDNSZone: refresh a DNS zone. -// Refresh an SOA DNS zone to reload the records in the DNS zone and update the SOA serial. +// RefreshDNSZone: Refresh an SOA DNS zone to reload the records in the DNS zone and update the SOA serial. // You can recreate the given DNS zone and its sub DNS zone if needed. func (s *API) RefreshDNSZone(req *RefreshDNSZoneRequest, opts ...scw.RequestOption) (*RefreshDNSZoneResponse, error) { var err error @@ -2488,9 +3263,8 @@ func (s *API) RefreshDNSZone(req *RefreshDNSZoneRequest, opts ...scw.RequestOpti } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/refresh", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/refresh", } err = scwReq.SetBody(req) @@ -2507,16 +3281,7 @@ func (s *API) RefreshDNSZone(req *RefreshDNSZoneRequest, opts ...scw.RequestOpti return &resp, nil } -type ListDNSZoneVersionsRequest struct { - DNSZone string `json:"-"` - // Page: page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: maximum number of DNS zones versions per page. - PageSize *uint32 `json:"-"` -} - -// ListDNSZoneVersions: list versions of a DNS zone. -// Retrieve a list of a DNS zone's versions.
+// ListDNSZoneVersions: Retrieve a list of a DNS zone's versions.
// The maximum version count is 100. If the count reaches this limit, the oldest version will be deleted after each new modification. func (s *API) ListDNSZoneVersions(req *ListDNSZoneVersionsRequest, opts ...scw.RequestOption) (*ListDNSZoneVersionsResponse, error) { var err error @@ -2535,10 +3300,9 @@ func (s *API) ListDNSZoneVersions(req *ListDNSZoneVersionsRequest, opts ...scw.R } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/versions", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/versions", + Query: query, } var resp ListDNSZoneVersionsResponse @@ -2550,16 +3314,7 @@ func (s *API) ListDNSZoneVersions(req *ListDNSZoneVersionsRequest, opts ...scw.R return &resp, nil } -type ListDNSZoneVersionRecordsRequest struct { - DNSZoneVersionID string `json:"-"` - // Page: page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: maximum number of DNS zones versions records per page. - PageSize *uint32 `json:"-"` -} - -// ListDNSZoneVersionRecords: list records from a given version of a specific DNS zone. -// Retrieve a list of records from a specific DNS zone version. +// ListDNSZoneVersionRecords: Retrieve a list of records from a specific DNS zone version. func (s *API) ListDNSZoneVersionRecords(req *ListDNSZoneVersionRecordsRequest, opts ...scw.RequestOption) (*ListDNSZoneVersionRecordsResponse, error) { var err error @@ -2577,10 +3332,9 @@ func (s *API) ListDNSZoneVersionRecords(req *ListDNSZoneVersionRecordsRequest, o } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/dns-zones/version/" + fmt.Sprint(req.DNSZoneVersionID) + "", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/dns-zones/version/" + fmt.Sprint(req.DNSZoneVersionID) + "", + Query: query, } var resp ListDNSZoneVersionRecordsResponse @@ -2592,12 +3346,7 @@ func (s *API) ListDNSZoneVersionRecords(req *ListDNSZoneVersionRecordsRequest, o return &resp, nil } -type GetDNSZoneVersionDiffRequest struct { - DNSZoneVersionID string `json:"-"` -} - -// GetDNSZoneVersionDiff: access differences from a specific DNS zone version. -// Access a previous DNS zone version to see the differences from another specific version. +// GetDNSZoneVersionDiff: Access a previous DNS zone version to see the differences from another specific version. func (s *API) GetDNSZoneVersionDiff(req *GetDNSZoneVersionDiffRequest, opts ...scw.RequestOption) (*GetDNSZoneVersionDiffResponse, error) { var err error @@ -2606,9 +3355,8 @@ func (s *API) GetDNSZoneVersionDiff(req *GetDNSZoneVersionDiffRequest, opts ...s } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/dns-zones/version/" + fmt.Sprint(req.DNSZoneVersionID) + "/diff", - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/dns-zones/version/" + fmt.Sprint(req.DNSZoneVersionID) + "/diff", } var resp GetDNSZoneVersionDiffResponse @@ -2620,12 +3368,7 @@ func (s *API) GetDNSZoneVersionDiff(req *GetDNSZoneVersionDiffRequest, opts ...s return &resp, nil } -type RestoreDNSZoneVersionRequest struct { - DNSZoneVersionID string `json:"-"` -} - -// RestoreDNSZoneVersion: restore a DNS zone version. -// Restore and activate a version of a specific DNS zone. +// RestoreDNSZoneVersion: Restore and activate a version of a specific DNS zone. func (s *API) RestoreDNSZoneVersion(req *RestoreDNSZoneVersionRequest, opts ...scw.RequestOption) (*RestoreDNSZoneVersionResponse, error) { var err error @@ -2634,9 +3377,8 @@ func (s *API) RestoreDNSZoneVersion(req *RestoreDNSZoneVersionRequest, opts ...s } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/dns-zones/version/" + fmt.Sprint(req.DNSZoneVersionID) + "/restore", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/dns-zones/version/" + fmt.Sprint(req.DNSZoneVersionID) + "/restore", } err = scwReq.SetBody(req) @@ -2653,12 +3395,7 @@ func (s *API) RestoreDNSZoneVersion(req *RestoreDNSZoneVersionRequest, opts ...s return &resp, nil } -type GetSSLCertificateRequest struct { - DNSZone string `json:"-"` -} - -// GetSSLCertificate: get a DNS zone's TLS certificate. -// Get the DNS zone's TLS certificate. If you do not have a certificate, the ouptut returns `no certificate found`. +// GetSSLCertificate: Get the DNS zone's TLS certificate. If you do not have a certificate, the ouptut returns `no certificate found`. func (s *API) GetSSLCertificate(req *GetSSLCertificateRequest, opts ...scw.RequestOption) (*SSLCertificate, error) { var err error @@ -2667,9 +3404,8 @@ func (s *API) GetSSLCertificate(req *GetSSLCertificateRequest, opts ...scw.Reque } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/ssl-certificates/" + fmt.Sprint(req.DNSZone) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/ssl-certificates/" + fmt.Sprint(req.DNSZone) + "", } var resp SSLCertificate @@ -2681,21 +3417,13 @@ func (s *API) GetSSLCertificate(req *GetSSLCertificateRequest, opts ...scw.Reque return &resp, nil } -type CreateSSLCertificateRequest struct { - DNSZone string `json:"dns_zone"` - - AlternativeDNSZones []string `json:"alternative_dns_zones"` -} - -// CreateSSLCertificate: create or get the DNS zone's TLS certificate. -// Create a new TLS certificate or retrieve information about an existing TLS certificate. +// CreateSSLCertificate: Create a new TLS certificate or retrieve information about an existing TLS certificate. func (s *API) CreateSSLCertificate(req *CreateSSLCertificateRequest, opts ...scw.RequestOption) (*SSLCertificate, error) { var err error scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/ssl-certificates", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/ssl-certificates", } err = scwReq.SetBody(req) @@ -2712,18 +3440,7 @@ func (s *API) CreateSSLCertificate(req *CreateSSLCertificateRequest, opts ...scw return &resp, nil } -type ListSSLCertificatesRequest struct { - DNSZone string `json:"-"` - - Page *int32 `json:"-"` - - PageSize *uint32 `json:"-"` - - ProjectID *string `json:"-"` -} - -// ListSSLCertificates: list a user's TLS certificates. -// List all the TLS certificates a user has created, specified by the user's Project ID and the DNS zone. +// ListSSLCertificates: List all the TLS certificates a user has created, specified by the user's Project ID and the DNS zone. func (s *API) ListSSLCertificates(req *ListSSLCertificatesRequest, opts ...scw.RequestOption) (*ListSSLCertificatesResponse, error) { var err error @@ -2739,10 +3456,9 @@ func (s *API) ListSSLCertificates(req *ListSSLCertificatesRequest, opts ...scw.R parameter.AddToQuery(query, "project_id", req.ProjectID) scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/ssl-certificates", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/ssl-certificates", + Query: query, } var resp ListSSLCertificatesResponse @@ -2754,12 +3470,7 @@ func (s *API) ListSSLCertificates(req *ListSSLCertificatesRequest, opts ...scw.R return &resp, nil } -type DeleteSSLCertificateRequest struct { - DNSZone string `json:"-"` -} - -// DeleteSSLCertificate: delete a TLS certificate. -// Delete an existing TLS certificate specified by its DNS zone. Deleting a TLS certificate is permanent and cannot be undone. +// DeleteSSLCertificate: Delete an existing TLS certificate specified by its DNS zone. Deleting a TLS certificate is permanent and cannot be undone. func (s *API) DeleteSSLCertificate(req *DeleteSSLCertificateRequest, opts ...scw.RequestOption) (*DeleteSSLCertificateResponse, error) { var err error @@ -2768,9 +3479,8 @@ func (s *API) DeleteSSLCertificate(req *DeleteSSLCertificateRequest, opts ...scw } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/domain/v2beta1/ssl-certificates/" + fmt.Sprint(req.DNSZone) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/domain/v2beta1/ssl-certificates/" + fmt.Sprint(req.DNSZone) + "", } var resp DeleteSSLCertificateResponse @@ -2782,12 +3492,7 @@ func (s *API) DeleteSSLCertificate(req *DeleteSSLCertificateRequest, opts ...scw return &resp, nil } -type GetDNSZoneTsigKeyRequest struct { - DNSZone string `json:"-"` -} - -// GetDNSZoneTsigKey: get the DNS zone's TSIG key. -// Retrieve information about the TSIG key of a given DNS zone to allow AXFR requests. +// GetDNSZoneTsigKey: Retrieve information about the TSIG key of a given DNS zone to allow AXFR requests. func (s *API) GetDNSZoneTsigKey(req *GetDNSZoneTsigKeyRequest, opts ...scw.RequestOption) (*GetDNSZoneTsigKeyResponse, error) { var err error @@ -2796,9 +3501,8 @@ func (s *API) GetDNSZoneTsigKey(req *GetDNSZoneTsigKeyRequest, opts ...scw.Reque } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/tsig-key", - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/tsig-key", } var resp GetDNSZoneTsigKeyResponse @@ -2810,12 +3514,7 @@ func (s *API) GetDNSZoneTsigKey(req *GetDNSZoneTsigKeyRequest, opts ...scw.Reque return &resp, nil } -type DeleteDNSZoneTsigKeyRequest struct { - DNSZone string `json:"-"` -} - -// DeleteDNSZoneTsigKey: delete the DNS zone's TSIG key. -// Delete an existing TSIG key specified by its DNS zone. Deleting a TSIG key is permanent and cannot be undone. +// DeleteDNSZoneTsigKey: Delete an existing TSIG key specified by its DNS zone. Deleting a TSIG key is permanent and cannot be undone. func (s *API) DeleteDNSZoneTsigKey(req *DeleteDNSZoneTsigKeyRequest, opts ...scw.RequestOption) error { var err error @@ -2824,9 +3523,8 @@ func (s *API) DeleteDNSZoneTsigKey(req *DeleteDNSZoneTsigKeyRequest, opts ...scw } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/tsig-key", - Headers: http.Header{}, + Method: "DELETE", + Path: "/domain/v2beta1/dns-zones/" + fmt.Sprint(req.DNSZone) + "/tsig-key", } err = s.client.Do(scwReq, nil, opts...) @@ -2836,28 +3534,19 @@ func (s *API) DeleteDNSZoneTsigKey(req *DeleteDNSZoneTsigKeyRequest, opts ...scw return nil } -// Service RegistrarAPI - -type RegistrarAPIListTasksRequest struct { - Page *int32 `json:"-"` - - PageSize *uint32 `json:"-"` - - ProjectID *string `json:"-"` - - OrganizationID *string `json:"-"` - - Domain *string `json:"-"` - - Types []TaskType `json:"-"` - - Statuses []TaskStatus `json:"-"` - // OrderBy: default value: domain_desc - OrderBy ListTasksRequestOrderBy `json:"-"` +// Manage your domains and contacts. +type RegistrarAPI struct { + client *scw.Client } -// ListTasks: list tasks. -// List all operations performed on the account. +// NewRegistrarAPI returns a RegistrarAPI object from a Scaleway client. +func NewRegistrarAPI(client *scw.Client) *RegistrarAPI { + return &RegistrarAPI{ + client: client, + } +} + +// ListTasks: List all operations performed on the account. // You can filter the list of tasks by domain name. func (s *RegistrarAPI) ListTasks(req *RegistrarAPIListTasksRequest, opts ...scw.RequestOption) (*ListTasksResponse, error) { var err error @@ -2878,10 +3567,9 @@ func (s *RegistrarAPI) ListTasks(req *RegistrarAPIListTasksRequest, opts ...scw. parameter.AddToQuery(query, "order_by", req.OrderBy) scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/tasks", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/tasks", + Query: query, } var resp ListTasksResponse @@ -2893,34 +3581,7 @@ func (s *RegistrarAPI) ListTasks(req *RegistrarAPIListTasksRequest, opts ...scw. return &resp, nil } -type RegistrarAPIBuyDomainsRequest struct { - Domains []string `json:"domains"` - - DurationInYears uint32 `json:"duration_in_years"` - - ProjectID string `json:"project_id"` - - // Precisely one of OwnerContact, OwnerContactID must be set. - OwnerContactID *string `json:"owner_contact_id,omitempty"` - - // Precisely one of OwnerContact, OwnerContactID must be set. - OwnerContact *NewContact `json:"owner_contact,omitempty"` - - // Precisely one of AdministrativeContact, AdministrativeContactID must be set. - AdministrativeContactID *string `json:"administrative_contact_id,omitempty"` - - // Precisely one of AdministrativeContact, AdministrativeContactID must be set. - AdministrativeContact *NewContact `json:"administrative_contact,omitempty"` - - // Precisely one of TechnicalContact, TechnicalContactID must be set. - TechnicalContactID *string `json:"technical_contact_id,omitempty"` - - // Precisely one of TechnicalContact, TechnicalContactID must be set. - TechnicalContact *NewContact `json:"technical_contact,omitempty"` -} - -// BuyDomains: purchase domains. -// Request the registration of domain names. +// BuyDomains: Request the registration of domain names. // You can provide a domain's already existing contact or a new contact. func (s *RegistrarAPI) BuyDomains(req *RegistrarAPIBuyDomainsRequest, opts ...scw.RequestOption) (*OrderResponse, error) { var err error @@ -2931,9 +3592,8 @@ func (s *RegistrarAPI) BuyDomains(req *RegistrarAPIBuyDomainsRequest, opts ...sc } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/buy-domains", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/buy-domains", } err = scwReq.SetBody(req) @@ -2950,23 +3610,13 @@ func (s *RegistrarAPI) BuyDomains(req *RegistrarAPIBuyDomainsRequest, opts ...sc return &resp, nil } -type RegistrarAPIRenewDomainsRequest struct { - Domains []string `json:"domains"` - - DurationInYears uint32 `json:"duration_in_years"` - - ForceLateRenewal *bool `json:"force_late_renewal"` -} - -// RenewDomains: renew domains. -// Request the renewal of one or more domain names. +// RenewDomains: Request the renewal of one or more domain names. func (s *RegistrarAPI) RenewDomains(req *RegistrarAPIRenewDomainsRequest, opts ...scw.RequestOption) (*OrderResponse, error) { var err error scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/renew-domains", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/renew-domains", } err = scwReq.SetBody(req) @@ -2983,32 +3633,7 @@ func (s *RegistrarAPI) RenewDomains(req *RegistrarAPIRenewDomainsRequest, opts . return &resp, nil } -type RegistrarAPITransferInDomainRequest struct { - Domains []*TransferInDomainRequestTransferRequest `json:"domains"` - - ProjectID string `json:"project_id"` - - // Precisely one of OwnerContact, OwnerContactID must be set. - OwnerContactID *string `json:"owner_contact_id,omitempty"` - - // Precisely one of OwnerContact, OwnerContactID must be set. - OwnerContact *NewContact `json:"owner_contact,omitempty"` - - // Precisely one of AdministrativeContact, AdministrativeContactID must be set. - AdministrativeContactID *string `json:"administrative_contact_id,omitempty"` - - // Precisely one of AdministrativeContact, AdministrativeContactID must be set. - AdministrativeContact *NewContact `json:"administrative_contact,omitempty"` - - // Precisely one of TechnicalContact, TechnicalContactID must be set. - TechnicalContactID *string `json:"technical_contact_id,omitempty"` - - // Precisely one of TechnicalContact, TechnicalContactID must be set. - TechnicalContact *NewContact `json:"technical_contact,omitempty"` -} - -// TransferInDomain: transfer a domain. -// Request the transfer of a domain from another registrar to Scaleway Domains and DNS. +// TransferInDomain: Request the transfer of a domain from another registrar to Scaleway Domains and DNS. func (s *RegistrarAPI) TransferInDomain(req *RegistrarAPITransferInDomainRequest, opts ...scw.RequestOption) (*OrderResponse, error) { var err error @@ -3018,9 +3643,8 @@ func (s *RegistrarAPI) TransferInDomain(req *RegistrarAPITransferInDomainRequest } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/domains/transfer-domains", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/domains/transfer-domains", } err = scwReq.SetBody(req) @@ -3037,20 +3661,7 @@ func (s *RegistrarAPI) TransferInDomain(req *RegistrarAPITransferInDomainRequest return &resp, nil } -type RegistrarAPITradeDomainRequest struct { - Domain string `json:"-"` - - ProjectID *string `json:"project_id"` - - // Precisely one of NewOwnerContact, NewOwnerContactID must be set. - NewOwnerContactID *string `json:"new_owner_contact_id,omitempty"` - - // Precisely one of NewOwnerContact, NewOwnerContactID must be set. - NewOwnerContact *NewContact `json:"new_owner_contact,omitempty"` -} - -// TradeDomain: trade a domain's contact. -// Request to change a domain's contact owner.
+// TradeDomain: Request to change a domain's contact owner.
// If you specify the `organization_id` of the domain's new owner, the contact will change from the current owner's Scaleway account to the new owner's Scaleway account.
// If the new owner's current contact information is not available, the first ever contact they have created for previous domains is taken into account to operate the change.
// If the new owner has never created a contact to register domains before, an error message displays. @@ -3062,9 +3673,8 @@ func (s *RegistrarAPI) TradeDomain(req *RegistrarAPITradeDomainRequest, opts ... } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/trade", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/trade", } err = scwReq.SetBody(req) @@ -3081,14 +3691,7 @@ func (s *RegistrarAPI) TradeDomain(req *RegistrarAPITradeDomainRequest, opts ... return &resp, nil } -type RegistrarAPIRegisterExternalDomainRequest struct { - Domain string `json:"domain"` - - ProjectID string `json:"project_id"` -} - -// RegisterExternalDomain: register an external domain. -// Request the registration of an external domain name. +// RegisterExternalDomain: Request the registration of an external domain name. func (s *RegistrarAPI) RegisterExternalDomain(req *RegistrarAPIRegisterExternalDomainRequest, opts ...scw.RequestOption) (*RegisterExternalDomainResponse, error) { var err error @@ -3098,9 +3701,8 @@ func (s *RegistrarAPI) RegisterExternalDomain(req *RegistrarAPIRegisterExternalD } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/external-domains", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/external-domains", } err = scwReq.SetBody(req) @@ -3117,12 +3719,7 @@ func (s *RegistrarAPI) RegisterExternalDomain(req *RegistrarAPIRegisterExternalD return &resp, nil } -type RegistrarAPIDeleteExternalDomainRequest struct { - Domain string `json:"-"` -} - -// DeleteExternalDomain: delete an external domain. -// Delete an external domain name. +// DeleteExternalDomain: Delete an external domain name. func (s *RegistrarAPI) DeleteExternalDomain(req *RegistrarAPIDeleteExternalDomainRequest, opts ...scw.RequestOption) (*DeleteExternalDomainResponse, error) { var err error @@ -3131,9 +3728,8 @@ func (s *RegistrarAPI) DeleteExternalDomain(req *RegistrarAPIDeleteExternalDomai } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/domain/v2beta1/external-domains/" + fmt.Sprint(req.Domain) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/domain/v2beta1/external-domains/" + fmt.Sprint(req.Domain) + "", } var resp DeleteExternalDomainResponse @@ -3145,40 +3741,14 @@ func (s *RegistrarAPI) DeleteExternalDomain(req *RegistrarAPIDeleteExternalDomai return &resp, nil } -type RegistrarAPICheckContactsCompatibilityRequest struct { - Domains []string `json:"domains"` - - Tlds []string `json:"tlds"` - - // Precisely one of OwnerContact, OwnerContactID must be set. - OwnerContactID *string `json:"owner_contact_id,omitempty"` - - // Precisely one of OwnerContact, OwnerContactID must be set. - OwnerContact *NewContact `json:"owner_contact,omitempty"` - - // Precisely one of AdministrativeContact, AdministrativeContactID must be set. - AdministrativeContactID *string `json:"administrative_contact_id,omitempty"` - - // Precisely one of AdministrativeContact, AdministrativeContactID must be set. - AdministrativeContact *NewContact `json:"administrative_contact,omitempty"` - - // Precisely one of TechnicalContact, TechnicalContactID must be set. - TechnicalContactID *string `json:"technical_contact_id,omitempty"` - - // Precisely one of TechnicalContact, TechnicalContactID must be set. - TechnicalContact *NewContact `json:"technical_contact,omitempty"` -} - -// CheckContactsCompatibility: check if contacts are compatible with a domain or a TLD. -// Check whether contacts are compatible with a domain or a TLD. +// CheckContactsCompatibility: Check whether contacts are compatible with a domain or a TLD. // If contacts are not compatible with either the domain or the TLD, the information that needs to be corrected is returned. func (s *RegistrarAPI) CheckContactsCompatibility(req *RegistrarAPICheckContactsCompatibilityRequest, opts ...scw.RequestOption) (*CheckContactsCompatibilityResponse, error) { var err error scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/check-contacts-compatibility", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/check-contacts-compatibility", } err = scwReq.SetBody(req) @@ -3195,24 +3765,7 @@ func (s *RegistrarAPI) CheckContactsCompatibility(req *RegistrarAPICheckContacts return &resp, nil } -type RegistrarAPIListContactsRequest struct { - Page *int32 `json:"-"` - - PageSize *uint32 `json:"-"` - - Domain *string `json:"-"` - - ProjectID *string `json:"-"` - - OrganizationID *string `json:"-"` - // Role: default value: unknown_role - Role ListContactsRequestRole `json:"-"` - // EmailStatus: default value: email_status_unknown - EmailStatus ContactEmailStatus `json:"-"` -} - -// ListContacts: list contacts. -// Retrieve the list of contacts and their associated domains and roles. +// ListContacts: Retrieve the list of contacts and their associated domains and roles. // You can filter the list by domain name. func (s *RegistrarAPI) ListContacts(req *RegistrarAPIListContactsRequest, opts ...scw.RequestOption) (*ListContactsResponse, error) { var err error @@ -3232,10 +3785,9 @@ func (s *RegistrarAPI) ListContacts(req *RegistrarAPIListContactsRequest, opts . parameter.AddToQuery(query, "email_status", req.EmailStatus) scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/contacts", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/contacts", + Query: query, } var resp ListContactsResponse @@ -3247,12 +3799,7 @@ func (s *RegistrarAPI) ListContacts(req *RegistrarAPIListContactsRequest, opts . return &resp, nil } -type RegistrarAPIGetContactRequest struct { - ContactID string `json:"-"` -} - -// GetContact: get a contact. -// Retrieve a contact's details from the registrar using the given contact's ID. +// GetContact: Retrieve a contact's details from the registrar using the given contact's ID. func (s *RegistrarAPI) GetContact(req *RegistrarAPIGetContactRequest, opts ...scw.RequestOption) (*Contact, error) { var err error @@ -3261,9 +3808,8 @@ func (s *RegistrarAPI) GetContact(req *RegistrarAPIGetContactRequest, opts ...sc } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/contacts/" + fmt.Sprint(req.ContactID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/contacts/" + fmt.Sprint(req.ContactID) + "", } var resp Contact @@ -3275,50 +3821,7 @@ func (s *RegistrarAPI) GetContact(req *RegistrarAPIGetContactRequest, opts ...sc return &resp, nil } -type RegistrarAPIUpdateContactRequest struct { - ContactID string `json:"-"` - - Email *string `json:"email"` - - EmailAlt *string `json:"email_alt"` - - PhoneNumber *string `json:"phone_number"` - - FaxNumber *string `json:"fax_number"` - - AddressLine1 *string `json:"address_line_1"` - - AddressLine2 *string `json:"address_line_2"` - - Zip *string `json:"zip"` - - City *string `json:"city"` - - Country *string `json:"country"` - - VatIdentificationCode *string `json:"vat_identification_code"` - - CompanyIdentificationCode *string `json:"company_identification_code"` - // Lang: default value: unknown_language_code - Lang LanguageCode `json:"lang"` - - Resale *bool `json:"resale"` - // Deprecated - Questions *[]*UpdateContactRequestQuestion `json:"questions,omitempty"` - - ExtensionFr *ContactExtensionFR `json:"extension_fr"` - - ExtensionEu *ContactExtensionEU `json:"extension_eu"` - - WhoisOptIn *bool `json:"whois_opt_in"` - - State *string `json:"state"` - - ExtensionNl *ContactExtensionNL `json:"extension_nl"` -} - -// UpdateContact: update contact. -// Edit the contact's information. +// UpdateContact: Edit the contact's information. func (s *RegistrarAPI) UpdateContact(req *RegistrarAPIUpdateContactRequest, opts ...scw.RequestOption) (*Contact, error) { var err error @@ -3327,9 +3830,8 @@ func (s *RegistrarAPI) UpdateContact(req *RegistrarAPIUpdateContactRequest, opts } scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/domain/v2beta1/contacts/" + fmt.Sprint(req.ContactID) + "", - Headers: http.Header{}, + Method: "PATCH", + Path: "/domain/v2beta1/contacts/" + fmt.Sprint(req.ContactID) + "", } err = scwReq.SetBody(req) @@ -3346,28 +3848,7 @@ func (s *RegistrarAPI) UpdateContact(req *RegistrarAPIUpdateContactRequest, opts return &resp, nil } -type RegistrarAPIListDomainsRequest struct { - Page *int32 `json:"-"` - - PageSize *uint32 `json:"-"` - // OrderBy: default value: domain_asc - OrderBy ListDomainsRequestOrderBy `json:"-"` - - Registrar *string `json:"-"` - // Status: default value: status_unknown - Status DomainStatus `json:"-"` - - ProjectID *string `json:"-"` - - OrganizationID *string `json:"-"` - - IsExternal *bool `json:"-"` - - Domain *string `json:"-"` -} - -// ListDomains: list domains. -// Retrieve the list of domains you own. +// ListDomains: Retrieve the list of domains you own. func (s *RegistrarAPI) ListDomains(req *RegistrarAPIListDomainsRequest, opts ...scw.RequestOption) (*ListDomainsResponse, error) { var err error @@ -3388,10 +3869,9 @@ func (s *RegistrarAPI) ListDomains(req *RegistrarAPIListDomainsRequest, opts ... parameter.AddToQuery(query, "domain", req.Domain) scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/domains", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/domains", + Query: query, } var resp ListDomainsResponse @@ -3403,20 +3883,7 @@ func (s *RegistrarAPI) ListDomains(req *RegistrarAPIListDomainsRequest, opts ... return &resp, nil } -type RegistrarAPIListRenewableDomainsRequest struct { - Page *int32 `json:"-"` - - PageSize *uint32 `json:"-"` - // OrderBy: default value: domain_asc - OrderBy ListRenewableDomainsRequestOrderBy `json:"-"` - - ProjectID *string `json:"-"` - - OrganizationID *string `json:"-"` -} - -// ListRenewableDomains: list domains that can be renewed. -// Retrieve the list of domains you own that can be renewed. You can also see the maximum renewal duration in years for your domains that are renewable. +// ListRenewableDomains: Retrieve the list of domains you own that can be renewed. You can also see the maximum renewal duration in years for your domains that are renewable. func (s *RegistrarAPI) ListRenewableDomains(req *RegistrarAPIListRenewableDomainsRequest, opts ...scw.RequestOption) (*ListRenewableDomainsResponse, error) { var err error @@ -3433,10 +3900,9 @@ func (s *RegistrarAPI) ListRenewableDomains(req *RegistrarAPIListRenewableDomain parameter.AddToQuery(query, "organization_id", req.OrganizationID) scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/renewable-domains", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/renewable-domains", + Query: query, } var resp ListRenewableDomainsResponse @@ -3448,12 +3914,7 @@ func (s *RegistrarAPI) ListRenewableDomains(req *RegistrarAPIListRenewableDomain return &resp, nil } -type RegistrarAPIGetDomainRequest struct { - Domain string `json:"-"` -} - -// GetDomain: get domain. -// Retrieve a specific domain and display the domain's information. +// GetDomain: Retrieve a specific domain and display the domain's information. func (s *RegistrarAPI) GetDomain(req *RegistrarAPIGetDomainRequest, opts ...scw.RequestOption) (*Domain, error) { var err error @@ -3462,9 +3923,8 @@ func (s *RegistrarAPI) GetDomain(req *RegistrarAPIGetDomainRequest, opts ...scw. } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "", } var resp Domain @@ -3476,30 +3936,7 @@ func (s *RegistrarAPI) GetDomain(req *RegistrarAPIGetDomainRequest, opts ...scw. return &resp, nil } -type RegistrarAPIUpdateDomainRequest struct { - Domain string `json:"-"` - - // Precisely one of TechnicalContact, TechnicalContactID must be set. - TechnicalContactID *string `json:"technical_contact_id,omitempty"` - - // Precisely one of TechnicalContact, TechnicalContactID must be set. - TechnicalContact *NewContact `json:"technical_contact,omitempty"` - - // Precisely one of OwnerContact, OwnerContactID must be set. - OwnerContactID *string `json:"owner_contact_id,omitempty"` - - // Precisely one of OwnerContact, OwnerContactID must be set. - OwnerContact *NewContact `json:"owner_contact,omitempty"` - - // Precisely one of AdministrativeContact, AdministrativeContactID must be set. - AdministrativeContactID *string `json:"administrative_contact_id,omitempty"` - - // Precisely one of AdministrativeContact, AdministrativeContactID must be set. - AdministrativeContact *NewContact `json:"administrative_contact,omitempty"` -} - -// UpdateDomain: update a domain's contacts. -// Update contacts for a specific domain or create a new contact.
+// UpdateDomain: Update contacts for a specific domain or create a new contact.
// If you add the same contact for multiple roles (owner, administrative, technical), only one ID will be created and used for all of the roles. func (s *RegistrarAPI) UpdateDomain(req *RegistrarAPIUpdateDomainRequest, opts ...scw.RequestOption) (*Domain, error) { var err error @@ -3509,9 +3946,8 @@ func (s *RegistrarAPI) UpdateDomain(req *RegistrarAPIUpdateDomainRequest, opts . } scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "", - Headers: http.Header{}, + Method: "PATCH", + Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "", } err = scwReq.SetBody(req) @@ -3528,12 +3964,7 @@ func (s *RegistrarAPI) UpdateDomain(req *RegistrarAPIUpdateDomainRequest, opts . return &resp, nil } -type RegistrarAPILockDomainTransferRequest struct { - Domain string `json:"-"` -} - -// LockDomainTransfer: lock the transfer of a domain. -// Lock the transfer of a domain. This means that the domain cannot be transferred and the authorization code cannot be requested to your current registrar. +// LockDomainTransfer: Lock the transfer of a domain. This means that the domain cannot be transferred and the authorization code cannot be requested to your current registrar. func (s *RegistrarAPI) LockDomainTransfer(req *RegistrarAPILockDomainTransferRequest, opts ...scw.RequestOption) (*Domain, error) { var err error @@ -3542,9 +3973,8 @@ func (s *RegistrarAPI) LockDomainTransfer(req *RegistrarAPILockDomainTransferReq } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/lock-transfer", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/lock-transfer", } err = scwReq.SetBody(req) @@ -3561,12 +3991,7 @@ func (s *RegistrarAPI) LockDomainTransfer(req *RegistrarAPILockDomainTransferReq return &resp, nil } -type RegistrarAPIUnlockDomainTransferRequest struct { - Domain string `json:"-"` -} - -// UnlockDomainTransfer: unlock the transfer of a domain. -// Unlock the transfer of a domain. This means that the domain can be transferred and the authorization code can be requested to your current registrar. +// UnlockDomainTransfer: Unlock the transfer of a domain. This means that the domain can be transferred and the authorization code can be requested to your current registrar. func (s *RegistrarAPI) UnlockDomainTransfer(req *RegistrarAPIUnlockDomainTransferRequest, opts ...scw.RequestOption) (*Domain, error) { var err error @@ -3575,9 +4000,8 @@ func (s *RegistrarAPI) UnlockDomainTransfer(req *RegistrarAPIUnlockDomainTransfe } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/unlock-transfer", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/unlock-transfer", } err = scwReq.SetBody(req) @@ -3594,12 +4018,7 @@ func (s *RegistrarAPI) UnlockDomainTransfer(req *RegistrarAPIUnlockDomainTransfe return &resp, nil } -type RegistrarAPIEnableDomainAutoRenewRequest struct { - Domain string `json:"-"` -} - -// EnableDomainAutoRenew: enable auto renew. -// Enable the `auto renew` feature for a domain. This means the domain will be automatically renewed before its expiry date. +// EnableDomainAutoRenew: Enable the `auto renew` feature for a domain. This means the domain will be automatically renewed before its expiry date. func (s *RegistrarAPI) EnableDomainAutoRenew(req *RegistrarAPIEnableDomainAutoRenewRequest, opts ...scw.RequestOption) (*Domain, error) { var err error @@ -3608,9 +4027,8 @@ func (s *RegistrarAPI) EnableDomainAutoRenew(req *RegistrarAPIEnableDomainAutoRe } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/enable-auto-renew", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/enable-auto-renew", } err = scwReq.SetBody(req) @@ -3627,12 +4045,7 @@ func (s *RegistrarAPI) EnableDomainAutoRenew(req *RegistrarAPIEnableDomainAutoRe return &resp, nil } -type RegistrarAPIDisableDomainAutoRenewRequest struct { - Domain string `json:"-"` -} - -// DisableDomainAutoRenew: disable auto renew. -// Disable the `auto renew` feature for a domain. This means the domain will not be renewed before its expiry date. +// DisableDomainAutoRenew: Disable the `auto renew` feature for a domain. This means the domain will not be renewed before its expiry date. func (s *RegistrarAPI) DisableDomainAutoRenew(req *RegistrarAPIDisableDomainAutoRenewRequest, opts ...scw.RequestOption) (*Domain, error) { var err error @@ -3641,9 +4054,8 @@ func (s *RegistrarAPI) DisableDomainAutoRenew(req *RegistrarAPIDisableDomainAuto } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/disable-auto-renew", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/disable-auto-renew", } err = scwReq.SetBody(req) @@ -3660,12 +4072,7 @@ func (s *RegistrarAPI) DisableDomainAutoRenew(req *RegistrarAPIDisableDomainAuto return &resp, nil } -type RegistrarAPIGetDomainAuthCodeRequest struct { - Domain string `json:"-"` -} - -// GetDomainAuthCode: get a domain's authorization code. -// Retrieve the authorization code to tranfer an unlocked domain. The output returns an error if the domain is locked. +// GetDomainAuthCode: Retrieve the authorization code to tranfer an unlocked domain. The output returns an error if the domain is locked. // Some TLDs may have a different procedure to retrieve the authorization code. In that case, the information displays in the message field. func (s *RegistrarAPI) GetDomainAuthCode(req *RegistrarAPIGetDomainAuthCodeRequest, opts ...scw.RequestOption) (*GetDomainAuthCodeResponse, error) { var err error @@ -3675,9 +4082,8 @@ func (s *RegistrarAPI) GetDomainAuthCode(req *RegistrarAPIGetDomainAuthCodeReque } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/auth-code", - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/auth-code", } var resp GetDomainAuthCodeResponse @@ -3689,14 +4095,7 @@ func (s *RegistrarAPI) GetDomainAuthCode(req *RegistrarAPIGetDomainAuthCodeReque return &resp, nil } -type RegistrarAPIEnableDomainDNSSECRequest struct { - Domain string `json:"-"` - - DsRecord *DSRecord `json:"ds_record"` -} - -// EnableDomainDNSSEC: update domain DNSSEC. -// If your domain has the default Scaleway NS and uses another registrar, you have to update the DS record manually. +// EnableDomainDNSSEC: If your domain has the default Scaleway NS and uses another registrar, you have to update the DS record manually. func (s *RegistrarAPI) EnableDomainDNSSEC(req *RegistrarAPIEnableDomainDNSSECRequest, opts ...scw.RequestOption) (*Domain, error) { var err error @@ -3705,9 +4104,8 @@ func (s *RegistrarAPI) EnableDomainDNSSEC(req *RegistrarAPIEnableDomainDNSSECReq } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/enable-dnssec", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/enable-dnssec", } err = scwReq.SetBody(req) @@ -3724,12 +4122,7 @@ func (s *RegistrarAPI) EnableDomainDNSSEC(req *RegistrarAPIEnableDomainDNSSECReq return &resp, nil } -type RegistrarAPIDisableDomainDNSSECRequest struct { - Domain string `json:"-"` -} - -// DisableDomainDNSSEC: disable a domain's DNSSEC. -// Disable DNSSEC for a domain. +// DisableDomainDNSSEC: Disable DNSSEC for a domain. func (s *RegistrarAPI) DisableDomainDNSSEC(req *RegistrarAPIDisableDomainDNSSECRequest, opts ...scw.RequestOption) (*Domain, error) { var err error @@ -3738,9 +4131,8 @@ func (s *RegistrarAPI) DisableDomainDNSSEC(req *RegistrarAPIDisableDomainDNSSECR } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/disable-dnssec", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/disable-dnssec", } err = scwReq.SetBody(req) @@ -3757,17 +4149,7 @@ func (s *RegistrarAPI) DisableDomainDNSSEC(req *RegistrarAPIDisableDomainDNSSECR return &resp, nil } -type RegistrarAPISearchAvailableDomainsRequest struct { - // Domains: a list of domain to search, TLD is optional. - Domains []string `json:"-"` - // Tlds: array of tlds to search on. - Tlds []string `json:"-"` - // StrictSearch: search exact match. - StrictSearch bool `json:"-"` -} - -// SearchAvailableDomains: search available domains. -// Search a domain or a maximum of 10 domains that are available. +// SearchAvailableDomains: Search a domain or a maximum of 10 domains that are available. // // If the TLD list is empty or not set, the search returns the results from the most popular TLDs. func (s *RegistrarAPI) SearchAvailableDomains(req *RegistrarAPISearchAvailableDomainsRequest, opts ...scw.RequestOption) (*SearchAvailableDomainsResponse, error) { @@ -3779,10 +4161,9 @@ func (s *RegistrarAPI) SearchAvailableDomains(req *RegistrarAPISearchAvailableDo parameter.AddToQuery(query, "strict_search", req.StrictSearch) scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/search-domains", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/search-domains", + Query: query, } var resp SearchAvailableDomainsResponse @@ -3794,16 +4175,37 @@ func (s *RegistrarAPI) SearchAvailableDomains(req *RegistrarAPISearchAvailableDo return &resp, nil } -type RegistrarAPICreateDomainHostRequest struct { - Domain string `json:"-"` +// ListTlds: Retrieve the list of TLDs and offers associated with them. +func (s *RegistrarAPI) ListTlds(req *RegistrarAPIListTldsRequest, opts ...scw.RequestOption) (*ListTldsResponse, error) { + var err error - Name string `json:"name"` + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } - IPs []net.IP `json:"ips"` + query := url.Values{} + parameter.AddToQuery(query, "tlds", req.Tlds) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "order_by", req.OrderBy) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/domain/v2beta1/tlds", + Query: query, + } + + var resp ListTldsResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil } -// CreateDomainHost: create a hostname for a domain. -// Create a hostname for a domain with glue IPs. +// CreateDomainHost: Create a hostname for a domain with glue IPs. func (s *RegistrarAPI) CreateDomainHost(req *RegistrarAPICreateDomainHostRequest, opts ...scw.RequestOption) (*Host, error) { var err error @@ -3812,9 +4214,8 @@ func (s *RegistrarAPI) CreateDomainHost(req *RegistrarAPICreateDomainHostRequest } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/hosts", - Headers: http.Header{}, + Method: "POST", + Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/hosts", } err = scwReq.SetBody(req) @@ -3831,16 +4232,7 @@ func (s *RegistrarAPI) CreateDomainHost(req *RegistrarAPICreateDomainHostRequest return &resp, nil } -type RegistrarAPIListDomainHostsRequest struct { - Domain string `json:"-"` - - Page *int32 `json:"-"` - - PageSize *uint32 `json:"-"` -} - -// ListDomainHosts: list a domain's hostnames. -// List a domain's hostnames using their glue IPs. +// ListDomainHosts: List a domain's hostnames using their glue IPs. func (s *RegistrarAPI) ListDomainHosts(req *RegistrarAPIListDomainHostsRequest, opts ...scw.RequestOption) (*ListDomainHostsResponse, error) { var err error @@ -3858,10 +4250,9 @@ func (s *RegistrarAPI) ListDomainHosts(req *RegistrarAPIListDomainHostsRequest, } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/hosts", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/hosts", + Query: query, } var resp ListDomainHostsResponse @@ -3873,16 +4264,7 @@ func (s *RegistrarAPI) ListDomainHosts(req *RegistrarAPIListDomainHostsRequest, return &resp, nil } -type RegistrarAPIUpdateDomainHostRequest struct { - Domain string `json:"-"` - - Name string `json:"-"` - - IPs *[]string `json:"ips"` -} - -// UpdateDomainHost: update a domain's hostname. -// Update a domain's hostname with glue IPs. +// UpdateDomainHost: Update a domain's hostname with glue IPs. func (s *RegistrarAPI) UpdateDomainHost(req *RegistrarAPIUpdateDomainHostRequest, opts ...scw.RequestOption) (*Host, error) { var err error @@ -3895,9 +4277,8 @@ func (s *RegistrarAPI) UpdateDomainHost(req *RegistrarAPIUpdateDomainHostRequest } scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/hosts/" + fmt.Sprint(req.Name) + "", - Headers: http.Header{}, + Method: "PATCH", + Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/hosts/" + fmt.Sprint(req.Name) + "", } err = scwReq.SetBody(req) @@ -3914,13 +4295,7 @@ func (s *RegistrarAPI) UpdateDomainHost(req *RegistrarAPIUpdateDomainHostRequest return &resp, nil } -type RegistrarAPIDeleteDomainHostRequest struct { - Domain string `json:"-"` - - Name string `json:"-"` -} - -// DeleteDomainHost: delete a domain's hostname. +// DeleteDomainHost: Delete a domain's hostname. func (s *RegistrarAPI) DeleteDomainHost(req *RegistrarAPIDeleteDomainHostRequest, opts ...scw.RequestOption) (*Host, error) { var err error @@ -3933,9 +4308,8 @@ func (s *RegistrarAPI) DeleteDomainHost(req *RegistrarAPIDeleteDomainHostRequest } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/hosts/" + fmt.Sprint(req.Name) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/domain/v2beta1/domains/" + fmt.Sprint(req.Domain) + "/hosts/" + fmt.Sprint(req.Name) + "", } var resp Host @@ -3946,193 +4320,3 @@ func (s *RegistrarAPI) DeleteDomainHost(req *RegistrarAPIDeleteDomainHostRequest } return &resp, nil } - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListDNSZonesResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListDNSZonesResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListDNSZonesResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.DNSZones = append(r.DNSZones, results.DNSZones...) - r.TotalCount += uint32(len(results.DNSZones)) - return uint32(len(results.DNSZones)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListDNSZoneRecordsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListDNSZoneRecordsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListDNSZoneRecordsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Records = append(r.Records, results.Records...) - r.TotalCount += uint32(len(results.Records)) - return uint32(len(results.Records)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListDNSZoneVersionsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListDNSZoneVersionsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListDNSZoneVersionsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Versions = append(r.Versions, results.Versions...) - r.TotalCount += uint32(len(results.Versions)) - return uint32(len(results.Versions)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListDNSZoneVersionRecordsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListDNSZoneVersionRecordsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListDNSZoneVersionRecordsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Records = append(r.Records, results.Records...) - r.TotalCount += uint32(len(results.Records)) - return uint32(len(results.Records)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListSSLCertificatesResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListSSLCertificatesResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListSSLCertificatesResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Certificates = append(r.Certificates, results.Certificates...) - r.TotalCount += uint32(len(results.Certificates)) - return uint32(len(results.Certificates)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListTasksResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListTasksResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListTasksResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Tasks = append(r.Tasks, results.Tasks...) - r.TotalCount += uint32(len(results.Tasks)) - return uint32(len(results.Tasks)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListContactsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListContactsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListContactsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Contacts = append(r.Contacts, results.Contacts...) - r.TotalCount += uint32(len(results.Contacts)) - return uint32(len(results.Contacts)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListDomainsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListDomainsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListDomainsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Domains = append(r.Domains, results.Domains...) - r.TotalCount += uint32(len(results.Domains)) - return uint32(len(results.Domains)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListRenewableDomainsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListRenewableDomainsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListRenewableDomainsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Domains = append(r.Domains, results.Domains...) - r.TotalCount += uint32(len(results.Domains)) - return uint32(len(results.Domains)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListDomainHostsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListDomainHostsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListDomainHostsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Hosts = append(r.Hosts, results.Hosts...) - r.TotalCount += uint32(len(results.Hosts)) - return uint32(len(results.Hosts)), nil -} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/domain/v2beta1/domain_utils.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/domain/v2beta1/domain_utils.go index 7df5cb8176..0987dc39fe 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/domain/v2beta1/domain_utils.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/domain/v2beta1/domain_utils.go @@ -25,6 +25,7 @@ const ( // WaitForDNSZoneRequest is used by WaitForDNSZone method. type WaitForDNSZoneRequest struct { DNSZone string + DNSZones []string Timeout *time.Duration RetryInterval *time.Duration } @@ -51,10 +52,16 @@ func (s *API) WaitForDNSZone( dns, err := async.WaitSync(&async.WaitSyncConfig{ Get: func() (interface{}, bool, error) { + listReq := &ListDNSZonesRequest{ + DNSZones: req.DNSZones, + } + + if req.DNSZone != "" { + listReq.DNSZone = &req.DNSZone + } + // listing dns zones and take the first one - DNSZones, err := s.ListDNSZones(&ListDNSZonesRequest{ - DNSZones: []string{req.DNSZone}, - }, opts...) + DNSZones, err := s.ListDNSZones(listReq, opts...) if err != nil { return nil, false, err diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/iam/v1alpha1/iam_sdk.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/iam/v1alpha1/iam_sdk.go index cf656fbe88..cf1723a95f 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/iam/v1alpha1/iam_sdk.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/iam/v1alpha1/iam_sdk.go @@ -39,26 +39,14 @@ var ( _ = namegenerator.GetRandomName ) -// API: iAM API. -type API struct { - client *scw.Client -} - -// NewAPI returns a API object from a Scaleway client. -func NewAPI(client *scw.Client) *API { - return &API{ - client: client, - } -} - type BearerType string const ( - // Unknown bearer type + // Unknown bearer type. BearerTypeUnknownBearerType = BearerType("unknown_bearer_type") - // User + // User. BearerTypeUser = BearerType("user") - // Application + // Application. BearerTypeApplication = BearerType("application") ) @@ -88,21 +76,21 @@ func (enum *BearerType) UnmarshalJSON(data []byte) error { type ListAPIKeysRequestOrderBy string const ( - // Creation date ascending + // Creation date ascending. ListAPIKeysRequestOrderByCreatedAtAsc = ListAPIKeysRequestOrderBy("created_at_asc") - // Creation date descending + // Creation date descending. ListAPIKeysRequestOrderByCreatedAtDesc = ListAPIKeysRequestOrderBy("created_at_desc") - // Update date ascending + // Update date ascending. ListAPIKeysRequestOrderByUpdatedAtAsc = ListAPIKeysRequestOrderBy("updated_at_asc") - // Update date descending + // Update date descending. ListAPIKeysRequestOrderByUpdatedAtDesc = ListAPIKeysRequestOrderBy("updated_at_desc") - // Expiration date ascending + // Expiration date ascending. ListAPIKeysRequestOrderByExpiresAtAsc = ListAPIKeysRequestOrderBy("expires_at_asc") - // Expiration date descending + // Expiration date descending. ListAPIKeysRequestOrderByExpiresAtDesc = ListAPIKeysRequestOrderBy("expires_at_desc") - // Access key ascending + // Access key ascending. ListAPIKeysRequestOrderByAccessKeyAsc = ListAPIKeysRequestOrderBy("access_key_asc") - // Access key descending + // Access key descending. ListAPIKeysRequestOrderByAccessKeyDesc = ListAPIKeysRequestOrderBy("access_key_desc") ) @@ -132,17 +120,17 @@ func (enum *ListAPIKeysRequestOrderBy) UnmarshalJSON(data []byte) error { type ListApplicationsRequestOrderBy string const ( - // Creation date ascending + // Creation date ascending. ListApplicationsRequestOrderByCreatedAtAsc = ListApplicationsRequestOrderBy("created_at_asc") - // Creation date descending + // Creation date descending. ListApplicationsRequestOrderByCreatedAtDesc = ListApplicationsRequestOrderBy("created_at_desc") - // Update date ascending + // Update date ascending. ListApplicationsRequestOrderByUpdatedAtAsc = ListApplicationsRequestOrderBy("updated_at_asc") - // Update date descending + // Update date descending. ListApplicationsRequestOrderByUpdatedAtDesc = ListApplicationsRequestOrderBy("updated_at_desc") - // Name ascending + // Name ascending. ListApplicationsRequestOrderByNameAsc = ListApplicationsRequestOrderBy("name_asc") - // Name descending + // Name descending. ListApplicationsRequestOrderByNameDesc = ListApplicationsRequestOrderBy("name_desc") ) @@ -172,17 +160,17 @@ func (enum *ListApplicationsRequestOrderBy) UnmarshalJSON(data []byte) error { type ListGroupsRequestOrderBy string const ( - // Creation date ascending + // Creation date ascending. ListGroupsRequestOrderByCreatedAtAsc = ListGroupsRequestOrderBy("created_at_asc") - // Creation date descending + // Creation date descending. ListGroupsRequestOrderByCreatedAtDesc = ListGroupsRequestOrderBy("created_at_desc") - // Update date ascending + // Update date ascending. ListGroupsRequestOrderByUpdatedAtAsc = ListGroupsRequestOrderBy("updated_at_asc") - // Update date descending + // Update date descending. ListGroupsRequestOrderByUpdatedAtDesc = ListGroupsRequestOrderBy("updated_at_desc") - // Name ascending + // Name ascending. ListGroupsRequestOrderByNameAsc = ListGroupsRequestOrderBy("name_asc") - // Name descending + // Name descending. ListGroupsRequestOrderByNameDesc = ListGroupsRequestOrderBy("name_desc") ) @@ -212,13 +200,13 @@ func (enum *ListGroupsRequestOrderBy) UnmarshalJSON(data []byte) error { type ListJWTsRequestOrderBy string const ( - // Creation date ascending + // Creation date ascending. ListJWTsRequestOrderByCreatedAtAsc = ListJWTsRequestOrderBy("created_at_asc") - // Creation date descending + // Creation date descending. ListJWTsRequestOrderByCreatedAtDesc = ListJWTsRequestOrderBy("created_at_desc") - // Update date ascending + // Update date ascending. ListJWTsRequestOrderByUpdatedAtAsc = ListJWTsRequestOrderBy("updated_at_asc") - // Update date descending + // Update date descending. ListJWTsRequestOrderByUpdatedAtDesc = ListJWTsRequestOrderBy("updated_at_desc") ) @@ -245,16 +233,48 @@ func (enum *ListJWTsRequestOrderBy) UnmarshalJSON(data []byte) error { return nil } +type ListLogsRequestOrderBy string + +const ( + // Creation date ascending. + ListLogsRequestOrderByCreatedAtAsc = ListLogsRequestOrderBy("created_at_asc") + // Creation date descending. + ListLogsRequestOrderByCreatedAtDesc = ListLogsRequestOrderBy("created_at_desc") +) + +func (enum ListLogsRequestOrderBy) String() string { + if enum == "" { + // return default value if empty + return "created_at_asc" + } + return string(enum) +} + +func (enum ListLogsRequestOrderBy) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, enum)), nil +} + +func (enum *ListLogsRequestOrderBy) UnmarshalJSON(data []byte) error { + tmp := "" + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + *enum = ListLogsRequestOrderBy(ListLogsRequestOrderBy(tmp).String()) + return nil +} + type ListPermissionSetsRequestOrderBy string const ( - // Name ascending + // Name ascending. ListPermissionSetsRequestOrderByNameAsc = ListPermissionSetsRequestOrderBy("name_asc") - // Name descending + // Name descending. ListPermissionSetsRequestOrderByNameDesc = ListPermissionSetsRequestOrderBy("name_desc") - // Creation date ascending + // Creation date ascending. ListPermissionSetsRequestOrderByCreatedAtAsc = ListPermissionSetsRequestOrderBy("created_at_asc") - // Creation date descending + // Creation date descending. ListPermissionSetsRequestOrderByCreatedAtDesc = ListPermissionSetsRequestOrderBy("created_at_desc") ) @@ -284,13 +304,13 @@ func (enum *ListPermissionSetsRequestOrderBy) UnmarshalJSON(data []byte) error { type ListPoliciesRequestOrderBy string const ( - // Policy name ascending + // Policy name ascending. ListPoliciesRequestOrderByPolicyNameAsc = ListPoliciesRequestOrderBy("policy_name_asc") - // Policy name descending + // Policy name descending. ListPoliciesRequestOrderByPolicyNameDesc = ListPoliciesRequestOrderBy("policy_name_desc") - // Creation date ascending + // Creation date ascending. ListPoliciesRequestOrderByCreatedAtAsc = ListPoliciesRequestOrderBy("created_at_asc") - // Creation date descending + // Creation date descending. ListPoliciesRequestOrderByCreatedAtDesc = ListPoliciesRequestOrderBy("created_at_desc") ) @@ -320,9 +340,9 @@ func (enum *ListPoliciesRequestOrderBy) UnmarshalJSON(data []byte) error { type ListQuotaRequestOrderBy string const ( - // Name ascending + // Name ascending. ListQuotaRequestOrderByNameAsc = ListQuotaRequestOrderBy("name_asc") - // Name descending + // Name descending. ListQuotaRequestOrderByNameDesc = ListQuotaRequestOrderBy("name_desc") ) @@ -352,17 +372,17 @@ func (enum *ListQuotaRequestOrderBy) UnmarshalJSON(data []byte) error { type ListSSHKeysRequestOrderBy string const ( - // Creation date ascending + // Creation date ascending. ListSSHKeysRequestOrderByCreatedAtAsc = ListSSHKeysRequestOrderBy("created_at_asc") - // Creation date descending + // Creation date descending. ListSSHKeysRequestOrderByCreatedAtDesc = ListSSHKeysRequestOrderBy("created_at_desc") - // Update date ascending + // Update date ascending. ListSSHKeysRequestOrderByUpdatedAtAsc = ListSSHKeysRequestOrderBy("updated_at_asc") - // Update date descending + // Update date descending. ListSSHKeysRequestOrderByUpdatedAtDesc = ListSSHKeysRequestOrderBy("updated_at_desc") - // Name ascending + // Name ascending. ListSSHKeysRequestOrderByNameAsc = ListSSHKeysRequestOrderBy("name_asc") - // Name descending + // Name descending. ListSSHKeysRequestOrderByNameDesc = ListSSHKeysRequestOrderBy("name_desc") ) @@ -392,21 +412,21 @@ func (enum *ListSSHKeysRequestOrderBy) UnmarshalJSON(data []byte) error { type ListUsersRequestOrderBy string const ( - // Creation date ascending + // Creation date ascending. ListUsersRequestOrderByCreatedAtAsc = ListUsersRequestOrderBy("created_at_asc") - // Creation date descending + // Creation date descending. ListUsersRequestOrderByCreatedAtDesc = ListUsersRequestOrderBy("created_at_desc") - // Update date ascending + // Update date ascending. ListUsersRequestOrderByUpdatedAtAsc = ListUsersRequestOrderBy("updated_at_asc") - // Update date descending + // Update date descending. ListUsersRequestOrderByUpdatedAtDesc = ListUsersRequestOrderBy("updated_at_desc") - // Email ascending + // Email ascending. ListUsersRequestOrderByEmailAsc = ListUsersRequestOrderBy("email_asc") - // Email descending + // Email descending. ListUsersRequestOrderByEmailDesc = ListUsersRequestOrderBy("email_desc") - // Last login ascending + // Last login ascending. ListUsersRequestOrderByLastLoginAsc = ListUsersRequestOrderBy("last_login_asc") - // Last login descending + // Last login descending. ListUsersRequestOrderByLastLoginDesc = ListUsersRequestOrderBy("last_login_desc") ) @@ -433,16 +453,92 @@ func (enum *ListUsersRequestOrderBy) UnmarshalJSON(data []byte) error { return nil } +type LogAction string + +const ( + // Unknown action. + LogActionUnknownAction = LogAction("unknown_action") + // Created. + LogActionCreated = LogAction("created") + // Updated. + LogActionUpdated = LogAction("updated") + // Deleted. + LogActionDeleted = LogAction("deleted") +) + +func (enum LogAction) String() string { + if enum == "" { + // return default value if empty + return "unknown_action" + } + return string(enum) +} + +func (enum LogAction) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, enum)), nil +} + +func (enum *LogAction) UnmarshalJSON(data []byte) error { + tmp := "" + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + *enum = LogAction(LogAction(tmp).String()) + return nil +} + +type LogResourceType string + +const ( + // Unknown resource type. + LogResourceTypeUnknownResourceType = LogResourceType("unknown_resource_type") + // API Key. + LogResourceTypeAPIKey = LogResourceType("api_key") + // User. + LogResourceTypeUser = LogResourceType("user") + // Application. + LogResourceTypeApplication = LogResourceType("application") + // Group. + LogResourceTypeGroup = LogResourceType("group") + // Policy. + LogResourceTypePolicy = LogResourceType("policy") +) + +func (enum LogResourceType) String() string { + if enum == "" { + // return default value if empty + return "unknown_resource_type" + } + return string(enum) +} + +func (enum LogResourceType) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, enum)), nil +} + +func (enum *LogResourceType) UnmarshalJSON(data []byte) error { + tmp := "" + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + *enum = LogResourceType(LogResourceType(tmp).String()) + return nil +} + type PermissionSetScopeType string const ( - // Unknown scope type + // Unknown scope type. PermissionSetScopeTypeUnknownScopeType = PermissionSetScopeType("unknown_scope_type") - // Projects + // Projects. PermissionSetScopeTypeProjects = PermissionSetScopeType("projects") - // Organization + // Organization. PermissionSetScopeTypeOrganization = PermissionSetScopeType("organization") - // Account root user + // Account root user. PermissionSetScopeTypeAccountRootUser = PermissionSetScopeType("account_root_user") ) @@ -472,11 +568,11 @@ func (enum *PermissionSetScopeType) UnmarshalJSON(data []byte) error { type UserStatus string const ( - // Unknown status + // Unknown status. UserStatusUnknownStatus = UserStatus("unknown_status") - // Invitation pending + // Invitation pending. UserStatusInvitationPending = UserStatus("invitation_pending") - // Activated + // Activated. UserStatusActivated = UserStatus("activated") ) @@ -506,11 +602,11 @@ func (enum *UserStatus) UnmarshalJSON(data []byte) error { type UserType string const ( - // Unknown type + // Unknown type. UserTypeUnknownType = UserType("unknown_type") - // Guest + // Guest. UserTypeGuest = UserType("guest") - // Owner + // Owner. UserTypeOwner = UserType("owner") ) @@ -537,30 +633,81 @@ func (enum *UserType) UnmarshalJSON(data []byte) error { return nil } +// RuleSpecs: rule specs. +type RuleSpecs struct { + // PermissionSetNames: names of permission sets bound to the rule. + PermissionSetNames *[]string `json:"permission_set_names"` + + // ProjectIDs: list of Project IDs the rule is scoped to. + // Precisely one of ProjectIDs, OrganizationID must be set. + ProjectIDs *[]string `json:"project_ids,omitempty"` + + // OrganizationID: ID of Organization the rule is scoped to. + // Precisely one of ProjectIDs, OrganizationID must be set. + OrganizationID *string `json:"organization_id,omitempty"` +} + +// JWT: jwt. +type JWT struct { + // Jti: jWT ID. + Jti string `json:"jti"` + + // IssuerID: ID of the user who issued the JWT. + IssuerID string `json:"issuer_id"` + + // AudienceID: ID of the user targeted by the JWT. + AudienceID string `json:"audience_id"` + + // CreatedAt: creation date of the JWT. + CreatedAt *time.Time `json:"created_at"` + + // UpdatedAt: last update date of the JWT. + UpdatedAt *time.Time `json:"updated_at"` + + // ExpiresAt: expiration date of the JWT. + ExpiresAt *time.Time `json:"expires_at"` + + // IP: IP address used during the creation of the JWT. + IP net.IP `json:"ip"` + + // UserAgent: user-agent used during the creation of the JWT. + UserAgent string `json:"user_agent"` +} + // APIKey: api key. type APIKey struct { // AccessKey: access key of the API key. AccessKey string `json:"access_key"` + // SecretKey: secret key of the API Key. SecretKey *string `json:"secret_key"` + // ApplicationID: ID of application that bears the API key. // Precisely one of ApplicationID, UserID must be set. ApplicationID *string `json:"application_id,omitempty"` + // UserID: ID of user that bears the API key. // Precisely one of ApplicationID, UserID must be set. UserID *string `json:"user_id,omitempty"` + // Description: description of API key. Description string `json:"description"` + // CreatedAt: date and time of API key creation. CreatedAt *time.Time `json:"created_at"` + // UpdatedAt: date and time of last API key update. UpdatedAt *time.Time `json:"updated_at"` + // ExpiresAt: date and time of API key expiration. ExpiresAt *time.Time `json:"expires_at"` + // DefaultProjectID: default Project ID specified for this API key. DefaultProjectID string `json:"default_project_id"` + // Editable: defines whether or not the API key is editable. Editable bool `json:"editable"` + // CreationIP: IP address of the device that created the API key. CreationIP string `json:"creation_ip"` } @@ -569,151 +716,109 @@ type APIKey struct { type Application struct { // ID: ID of the application. ID string `json:"id"` + // Name: name of the application. Name string `json:"name"` + // Description: description of the application. Description string `json:"description"` + // CreatedAt: date and time application was created. CreatedAt *time.Time `json:"created_at"` + // UpdatedAt: date and time of last application update. UpdatedAt *time.Time `json:"updated_at"` + // OrganizationID: ID of the Organization. OrganizationID string `json:"organization_id"` + // Editable: defines whether or not the application is editable. Editable bool `json:"editable"` + // NbAPIKeys: number of API keys attributed to the application. NbAPIKeys uint32 `json:"nb_api_keys"` + + // Tags: tags associated with the user. + Tags []string `json:"tags"` } // Group: group. type Group struct { // ID: ID of the group. ID string `json:"id"` + // CreatedAt: date and time of group creation. CreatedAt *time.Time `json:"created_at"` + // UpdatedAt: date and time of last group update. UpdatedAt *time.Time `json:"updated_at"` + // OrganizationID: ID of Organization linked to the group. OrganizationID string `json:"organization_id"` + // Name: name of the group. Name string `json:"name"` + // Description: description of the group. Description string `json:"description"` + // UserIDs: iDs of users attached to this group. UserIDs []string `json:"user_ids"` + // ApplicationIDs: iDs of applications attached to this group. ApplicationIDs []string `json:"application_ids"` + + // Tags: tags associated to the group. + Tags []string `json:"tags"` } -// JWT: jwt. -type JWT struct { - // Jti: jWT ID. - Jti string `json:"jti"` - // IssuerID: ID of the user who issued the JWT. - IssuerID string `json:"issuer_id"` - // AudienceID: ID of the user targeted by the JWT. - AudienceID string `json:"audience_id"` - // CreatedAt: creation date of the JWT. +// Log: log. +type Log struct { + // ID: log ID. + ID string `json:"id"` + + // CreatedAt: creation date of the log. CreatedAt *time.Time `json:"created_at"` - // UpdatedAt: last update date of the JWT. - UpdatedAt *time.Time `json:"updated_at"` - // ExpiresAt: expiration date of the JWT. - ExpiresAt *time.Time `json:"expires_at"` - // IP: IP address used during the creation of the JWT. + + // IP: IP address of the HTTP request linked to the log. IP net.IP `json:"ip"` - // UserAgent: user-agent used during the creation of the JWT. + + // UserAgent: user-Agent of the HTTP request linked to the log. UserAgent string `json:"user_agent"` -} -// ListAPIKeysResponse: list api keys response. -type ListAPIKeysResponse struct { - // APIKeys: list of API keys. - APIKeys []*APIKey `json:"api_keys"` - // TotalCount: total count of API Keys. - TotalCount uint32 `json:"total_count"` -} + // Action: action linked to the log. + // Default value: unknown_action + Action LogAction `json:"action"` -// ListApplicationsResponse: list applications response. -type ListApplicationsResponse struct { - // Applications: list of applications. - Applications []*Application `json:"applications"` - // TotalCount: total count of applications. - TotalCount uint32 `json:"total_count"` -} + // BearerID: ID of the principal at the origin of the log. + BearerID string `json:"bearer_id"` -// ListGroupsResponse: list groups response. -type ListGroupsResponse struct { - // Groups: list of groups. - Groups []*Group `json:"groups"` - // TotalCount: total count of groups. - TotalCount uint32 `json:"total_count"` -} + // OrganizationID: ID of Organization linked to the log. + OrganizationID string `json:"organization_id"` -type ListJWTsResponse struct { - Jwts []*JWT `json:"jwts"` + // ResourceType: type of the resource linked to the log. + // Default value: unknown_resource_type + ResourceType LogResourceType `json:"resource_type"` - TotalCount uint64 `json:"total_count"` -} - -// ListPermissionSetsResponse: list permission sets response. -type ListPermissionSetsResponse struct { - // PermissionSets: list of permission sets. - PermissionSets []*PermissionSet `json:"permission_sets"` - // TotalCount: total count of permission sets. - TotalCount uint32 `json:"total_count"` -} - -// ListPoliciesResponse: list policies response. -type ListPoliciesResponse struct { - // Policies: list of policies. - Policies []*Policy `json:"policies"` - // TotalCount: total count of policies. - TotalCount uint32 `json:"total_count"` -} - -// ListQuotaResponse: list quota response. -type ListQuotaResponse struct { - // Quota: list of quota. - Quota []*Quotum `json:"quota"` - // TotalCount: total count of quota. - TotalCount uint64 `json:"total_count"` -} - -// ListRulesResponse: list rules response. -type ListRulesResponse struct { - // Rules: rules of the policy. - Rules []*Rule `json:"rules"` - // TotalCount: total count of rules. - TotalCount uint32 `json:"total_count"` -} - -// ListSSHKeysResponse: list ssh keys response. -type ListSSHKeysResponse struct { - // SSHKeys: list of SSH keys. - SSHKeys []*SSHKey `json:"ssh_keys"` - // TotalCount: total count of SSH keys. - TotalCount uint32 `json:"total_count"` -} - -// ListUsersResponse: list users response. -type ListUsersResponse struct { - // Users: list of users. - Users []*User `json:"users"` - // TotalCount: total count of users. - TotalCount uint32 `json:"total_count"` + // ResourceID: ID of the resource linked to the log. + ResourceID string `json:"resource_id"` } // PermissionSet: permission set. type PermissionSet struct { // ID: id of the permission set. ID string `json:"id"` + // Name: name of the permission set. Name string `json:"name"` + // ScopeType: scope of the permission set. // Default value: unknown_scope_type ScopeType PermissionSetScopeType `json:"scope_type"` + // Description: description of the permission set. Description string `json:"description"` + // Categories: categories of the permission set. Categories *[]string `json:"categories"` } @@ -722,35 +827,51 @@ type PermissionSet struct { type Policy struct { // ID: id of the policy. ID string `json:"id"` + // Name: name of the policy. Name string `json:"name"` + // Description: description of the policy. Description string `json:"description"` + // OrganizationID: organization ID of the policy. OrganizationID string `json:"organization_id"` + // CreatedAt: date and time of policy creation. CreatedAt *time.Time `json:"created_at"` + // UpdatedAt: date and time of last policy update. UpdatedAt *time.Time `json:"updated_at"` + // Editable: defines whether or not a policy is editable. Editable bool `json:"editable"` + // NbRules: number of rules of the policy. NbRules uint32 `json:"nb_rules"` + // NbScopes: number of policy scopes. NbScopes uint32 `json:"nb_scopes"` + // NbPermissionSets: number of permission sets of the policy. NbPermissionSets uint32 `json:"nb_permission_sets"` + + // Tags: tags associated with the policy. + Tags []string `json:"tags"` + // UserID: ID of the user attributed to the policy. - // Precisely one of ApplicationID, GroupID, NoPrincipal, UserID must be set. + // Precisely one of UserID, GroupID, ApplicationID, NoPrincipal must be set. UserID *string `json:"user_id,omitempty"` + // GroupID: ID of the group attributed to the policy. - // Precisely one of ApplicationID, GroupID, NoPrincipal, UserID must be set. + // Precisely one of UserID, GroupID, ApplicationID, NoPrincipal must be set. GroupID *string `json:"group_id,omitempty"` + // ApplicationID: ID of the application attributed to the policy. - // Precisely one of ApplicationID, GroupID, NoPrincipal, UserID must be set. + // Precisely one of UserID, GroupID, ApplicationID, NoPrincipal must be set. ApplicationID *string `json:"application_id,omitempty"` + // NoPrincipal: defines whether or not a policy is attributed to a principal. - // Precisely one of ApplicationID, GroupID, NoPrincipal, UserID must be set. + // Precisely one of UserID, GroupID, ApplicationID, NoPrincipal must be set. NoPrincipal *bool `json:"no_principal,omitempty"` } @@ -758,9 +879,11 @@ type Policy struct { type Quotum struct { // Name: name of the quota. Name string `json:"name"` + // Limit: maximum limit of the quota. // Precisely one of Limit, Unlimited must be set. Limit *uint64 `json:"limit,omitempty"` + // Unlimited: defines whether or not the quota is unlimited. // Precisely one of Limit, Unlimited must be set. Unlimited *bool `json:"unlimited,omitempty"` @@ -770,1885 +893,388 @@ type Quotum struct { type Rule struct { // ID: id of rule. ID string `json:"id"` + // PermissionSetNames: names of permission sets bound to the rule. PermissionSetNames *[]string `json:"permission_set_names"` + // PermissionSetsScopeType: permission_set_names have the same scope_type. // Default value: unknown_scope_type PermissionSetsScopeType PermissionSetScopeType `json:"permission_sets_scope_type"` - // ProjectIDs: list of Project IDs the rule is scoped to. - // Precisely one of AccountRootUserID, OrganizationID, ProjectIDs must be set. - ProjectIDs *[]string `json:"project_ids,omitempty"` - // OrganizationID: ID of Organization the rule is scoped to. - // Precisely one of AccountRootUserID, OrganizationID, ProjectIDs must be set. - OrganizationID *string `json:"organization_id,omitempty"` - // AccountRootUserID: ID of account root user the rule is scoped to. - // Precisely one of AccountRootUserID, OrganizationID, ProjectIDs must be set. - AccountRootUserID *string `json:"account_root_user_id,omitempty"` -} -// RuleSpecs: rule specs. -type RuleSpecs struct { - // PermissionSetNames: names of permission sets bound to the rule. - PermissionSetNames *[]string `json:"permission_set_names"` // ProjectIDs: list of Project IDs the rule is scoped to. - // Precisely one of OrganizationID, ProjectIDs must be set. + // Precisely one of ProjectIDs, OrganizationID, AccountRootUserID must be set. ProjectIDs *[]string `json:"project_ids,omitempty"` + // OrganizationID: ID of Organization the rule is scoped to. - // Precisely one of OrganizationID, ProjectIDs must be set. + // Precisely one of ProjectIDs, OrganizationID, AccountRootUserID must be set. OrganizationID *string `json:"organization_id,omitempty"` + + // AccountRootUserID: ID of account root user the rule is scoped to. + // Precisely one of ProjectIDs, OrganizationID, AccountRootUserID must be set. + AccountRootUserID *string `json:"account_root_user_id,omitempty"` } // SSHKey: ssh key. type SSHKey struct { // ID: ID of SSH key. ID string `json:"id"` + // Name: name of SSH key. Name string `json:"name"` + // PublicKey: public key of SSH key. PublicKey string `json:"public_key"` + // Fingerprint: fingerprint of the SSH key. Fingerprint string `json:"fingerprint"` + // CreatedAt: creation date of SSH key. CreatedAt *time.Time `json:"created_at"` + // UpdatedAt: last update date of SSH key. UpdatedAt *time.Time `json:"updated_at"` + // OrganizationID: ID of Organization linked to the SSH key. OrganizationID string `json:"organization_id"` + // ProjectID: ID of Project linked to the SSH key. ProjectID string `json:"project_id"` + // Disabled: SSH key status. Disabled bool `json:"disabled"` } -// SetRulesResponse: set rules response. -type SetRulesResponse struct { - // Rules: rules of the policy. - Rules []*Rule `json:"rules"` -} - // User: user. type User struct { // ID: ID of user. ID string `json:"id"` + // Email: email of user. Email string `json:"email"` + // CreatedAt: date user was created. CreatedAt *time.Time `json:"created_at"` + // UpdatedAt: date of last user update. UpdatedAt *time.Time `json:"updated_at"` + // OrganizationID: ID of the Organization. OrganizationID string `json:"organization_id"` + // Deletable: deletion status of user. Owners cannot be deleted. Deletable bool `json:"deletable"` + // LastLoginAt: date of the last login. LastLoginAt *time.Time `json:"last_login_at"` + // Type: type of user. // Default value: unknown_type Type UserType `json:"type"` + // Deprecated: TwoFactorEnabled: deprecated, use "mfa" instead. TwoFactorEnabled *bool `json:"two_factor_enabled,omitempty"` + // Status: status of user invitation. // Default value: unknown_status Status UserStatus `json:"status"` + // Mfa: defines whether MFA is enabled. Mfa bool `json:"mfa"` + // AccountRootUserID: ID of the account root user associated with the user. AccountRootUserID string `json:"account_root_user_id"` + + // Tags: tags associated with the user. + Tags []string `json:"tags"` } -// Service API - -type ListSSHKeysRequest struct { - // OrderBy: sort order of the SSH keys. - // Default value: created_at_asc - OrderBy ListSSHKeysRequestOrderBy `json:"-"` - // Page: requested page number. Value must be greater or equal to 1. - // Default value: 1 - Page *int32 `json:"-"` - // PageSize: number of items per page. Value must be between 1 and 100. - // Default value: 20 - PageSize *uint32 `json:"-"` - // OrganizationID: filter by Organization ID. - OrganizationID *string `json:"-"` - // Name: name of group to find. - Name *string `json:"-"` - // ProjectID: filter by Project ID. - ProjectID *string `json:"-"` - // Disabled: defines whether to include disabled SSH keys or not. - Disabled *bool `json:"-"` -} - -// ListSSHKeys: list SSH keys. -// List SSH keys. By default, the SSH keys listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You can define additional parameters for your query such as `organization_id`, `name`, `project_id` and `disabled`. -func (s *API) ListSSHKeys(req *ListSSHKeysRequest, opts ...scw.RequestOption) (*ListSSHKeysResponse, error) { - var err error - - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "order_by", req.OrderBy) - parameter.AddToQuery(query, "page", req.Page) - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "organization_id", req.OrganizationID) - parameter.AddToQuery(query, "name", req.Name) - parameter.AddToQuery(query, "project_id", req.ProjectID) - parameter.AddToQuery(query, "disabled", req.Disabled) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/ssh-keys", - Query: query, - Headers: http.Header{}, - } - - var resp ListSSHKeysResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type CreateSSHKeyRequest struct { - // Name: name of the SSH key. Max length is 1000. - Name string `json:"name"` - // PublicKey: SSH public key. Currently only the ssh-rsa, ssh-dss (DSA), ssh-ed25519 and ecdsa keys with NIST curves are supported. Max length is 65000. - PublicKey string `json:"public_key"` - // ProjectID: project the resource is attributed to. - ProjectID string `json:"project_id"` -} - -// CreateSSHKey: create an SSH key. -// Add a new SSH key to a Scaleway Project. You must specify the `name`, `public_key` and `project_id`. -func (s *API) CreateSSHKey(req *CreateSSHKeyRequest, opts ...scw.RequestOption) (*SSHKey, error) { - var err error - - if req.ProjectID == "" { - defaultProjectID, _ := s.client.GetDefaultProjectID() - req.ProjectID = defaultProjectID - } - - if req.Name == "" { - req.Name = namegenerator.GetRandomName("key") - } - - scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/iam/v1alpha1/ssh-keys", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp SSHKey - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type GetSSHKeyRequest struct { - // SSHKeyID: ID of the SSH key. - SSHKeyID string `json:"-"` -} - -// GetSSHKey: get an SSH key. -// Retrieve information about a given SSH key, specified by the `ssh_key_id` parameter. The SSH key's full details, including `id`, `name`, `public_key`, and `project_id` are returned in the response. -func (s *API) GetSSHKey(req *GetSSHKeyRequest, opts ...scw.RequestOption) (*SSHKey, error) { - var err error - - if fmt.Sprint(req.SSHKeyID) == "" { - return nil, errors.New("field SSHKeyID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/ssh-keys/" + fmt.Sprint(req.SSHKeyID) + "", - Headers: http.Header{}, - } - - var resp SSHKey - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type UpdateSSHKeyRequest struct { - SSHKeyID string `json:"-"` - // Name: name of the SSH key. Max length is 1000. - Name *string `json:"name"` - // Disabled: enable or disable the SSH key. - Disabled *bool `json:"disabled"` -} - -// UpdateSSHKey: update an SSH key. -// Update the parameters of an SSH key, including `name` and `disable`. -func (s *API) UpdateSSHKey(req *UpdateSSHKeyRequest, opts ...scw.RequestOption) (*SSHKey, error) { - var err error - - if fmt.Sprint(req.SSHKeyID) == "" { - return nil, errors.New("field SSHKeyID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/iam/v1alpha1/ssh-keys/" + fmt.Sprint(req.SSHKeyID) + "", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp SSHKey - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type DeleteSSHKeyRequest struct { - SSHKeyID string `json:"-"` -} - -// DeleteSSHKey: delete an SSH key. -// Delete a given SSH key, specified by the `ssh_key_id`. Deleting an SSH is permanent, and cannot be undone. Note that you might need to update any configurations that used the SSH key. -func (s *API) DeleteSSHKey(req *DeleteSSHKeyRequest, opts ...scw.RequestOption) error { - var err error - - if fmt.Sprint(req.SSHKeyID) == "" { - return errors.New("field SSHKeyID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/iam/v1alpha1/ssh-keys/" + fmt.Sprint(req.SSHKeyID) + "", - Headers: http.Header{}, - } - - err = s.client.Do(scwReq, nil, opts...) - if err != nil { - return err - } - return nil -} - -type ListUsersRequest struct { - // OrderBy: criteria for sorting results. - // Default value: created_at_asc - OrderBy ListUsersRequestOrderBy `json:"-"` - // PageSize: number of results per page. Value must be between 1 and 100. - // Default value: 20 - PageSize *uint32 `json:"-"` - // Page: page number. Value must be greater or equal to 1. - // Default value: 1 - Page *int32 `json:"-"` - // OrganizationID: ID of the Organization to filter. - OrganizationID *string `json:"-"` - // UserIDs: filter by list of IDs. - UserIDs []string `json:"-"` - // Mfa: filter by MFA status. - Mfa *bool `json:"-"` -} - -// ListUsers: list users of an Organization. -// List the users of an Organization. By default, the users listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You must define the `organization_id` in the query path of your request. You can also define additional parameters for your query such as `user_ids`. -func (s *API) ListUsers(req *ListUsersRequest, opts ...scw.RequestOption) (*ListUsersResponse, error) { - var err error - - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "order_by", req.OrderBy) - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "page", req.Page) - parameter.AddToQuery(query, "organization_id", req.OrganizationID) - parameter.AddToQuery(query, "user_ids", req.UserIDs) - parameter.AddToQuery(query, "mfa", req.Mfa) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/users", - Query: query, - Headers: http.Header{}, - } - - var resp ListUsersResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type GetUserRequest struct { - // UserID: ID of the user to find. - UserID string `json:"-"` -} - -// GetUser: get a given user. -// Retrieve information about a user, specified by the `user_id` parameter. The user's full details, including `id`, `email`, `organization_id`, `status` and `mfa` are returned in the response. -func (s *API) GetUser(req *GetUserRequest, opts ...scw.RequestOption) (*User, error) { - var err error - - if fmt.Sprint(req.UserID) == "" { - return nil, errors.New("field UserID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/users/" + fmt.Sprint(req.UserID) + "", - Headers: http.Header{}, - } - - var resp User - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type DeleteUserRequest struct { - // UserID: ID of the user to delete. - UserID string `json:"-"` -} - -// DeleteUser: delete a guest user from an Organization. -// Remove a user from an Organization in which they are a guest. You must define the `user_id` in your request. Note that removing a user from an Organization automatically deletes their API keys, and any policies directly attached to them become orphaned. -func (s *API) DeleteUser(req *DeleteUserRequest, opts ...scw.RequestOption) error { - var err error - - if fmt.Sprint(req.UserID) == "" { - return errors.New("field UserID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/iam/v1alpha1/users/" + fmt.Sprint(req.UserID) + "", - Headers: http.Header{}, - } - - err = s.client.Do(scwReq, nil, opts...) - if err != nil { - return err - } - return nil -} - -type CreateUserRequest struct { - // OrganizationID: ID of the Organization. - OrganizationID string `json:"organization_id"` - // Email: email of the user. - Email string `json:"email"` -} - -// CreateUser: create a new user. -// Create a new user. You must define the `organization_id` and the `email` in your request. -func (s *API) CreateUser(req *CreateUserRequest, opts ...scw.RequestOption) (*User, error) { - var err error - - if req.OrganizationID == "" { - defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() - req.OrganizationID = defaultOrganizationID - } - - scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/iam/v1alpha1/users", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp User - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type ListApplicationsRequest struct { - // OrderBy: criteria for sorting results. - // Default value: created_at_asc - OrderBy ListApplicationsRequestOrderBy `json:"-"` - // PageSize: number of results per page. Value must be between 1 and 100. - // Default value: 20 - PageSize *uint32 `json:"-"` - // Page: page number. Value must be greater than 1. - // Default value: 1 - Page *int32 `json:"-"` - // Name: name of the application to filter. - Name *string `json:"-"` - // OrganizationID: ID of the Organization to filter. - OrganizationID *string `json:"-"` - // Editable: defines whether to filter out editable applications or not. - Editable *bool `json:"-"` - // ApplicationIDs: filter by list of IDs. - ApplicationIDs []string `json:"-"` -} - -// ListApplications: list applications of an Organization. -// List the applications of an Organization. By default, the applications listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You must define the `organization_id` in the query path of your request. You can also define additional parameters for your query such as `application_ids`. -func (s *API) ListApplications(req *ListApplicationsRequest, opts ...scw.RequestOption) (*ListApplicationsResponse, error) { - var err error - - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "order_by", req.OrderBy) - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "page", req.Page) - parameter.AddToQuery(query, "name", req.Name) - parameter.AddToQuery(query, "organization_id", req.OrganizationID) - parameter.AddToQuery(query, "editable", req.Editable) - parameter.AddToQuery(query, "application_ids", req.ApplicationIDs) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/applications", - Query: query, - Headers: http.Header{}, - } - - var resp ListApplicationsResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type CreateApplicationRequest struct { - // Name: name of the application to create (max length is 64 characters). - Name string `json:"name"` - // OrganizationID: ID of the Organization. - OrganizationID string `json:"organization_id"` - // Description: description of the application (max length is 200 characters). - Description string `json:"description"` -} - -// CreateApplication: create a new application. -// Create a new application. You must define the `name` parameter in the request. -func (s *API) CreateApplication(req *CreateApplicationRequest, opts ...scw.RequestOption) (*Application, error) { - var err error - - if req.OrganizationID == "" { - defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() - req.OrganizationID = defaultOrganizationID - } - - if req.Name == "" { - req.Name = namegenerator.GetRandomName("app") - } - - scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/iam/v1alpha1/applications", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp Application - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type GetApplicationRequest struct { - // ApplicationID: ID of the application to find. - ApplicationID string `json:"-"` -} - -// GetApplication: get a given application. -// Retrieve information about an application, specified by the `application_id` parameter. The application's full details, including `id`, `email`, `organization_id`, `status` and `two_factor_enabled` are returned in the response. -func (s *API) GetApplication(req *GetApplicationRequest, opts ...scw.RequestOption) (*Application, error) { - var err error - - if fmt.Sprint(req.ApplicationID) == "" { - return nil, errors.New("field ApplicationID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/applications/" + fmt.Sprint(req.ApplicationID) + "", - Headers: http.Header{}, - } - - var resp Application - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type UpdateApplicationRequest struct { - // ApplicationID: ID of the application to update. - ApplicationID string `json:"-"` - // Name: new name for the application (max length is 64 chars). - Name *string `json:"name"` - // Description: new description for the application (max length is 200 chars). - Description *string `json:"description"` -} - -// UpdateApplication: update an application. -// Update the parameters of an application, including `name` and `description`. -func (s *API) UpdateApplication(req *UpdateApplicationRequest, opts ...scw.RequestOption) (*Application, error) { - var err error - - if fmt.Sprint(req.ApplicationID) == "" { - return nil, errors.New("field ApplicationID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/iam/v1alpha1/applications/" + fmt.Sprint(req.ApplicationID) + "", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp Application - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type DeleteApplicationRequest struct { - // ApplicationID: ID of the application to delete. - ApplicationID string `json:"-"` -} - -// DeleteApplication: delete an application. -// Delete an application. Note that this action is irreversible and will automatically delete the application's API keys. Policies attached to users and applications via this group will no longer apply. -func (s *API) DeleteApplication(req *DeleteApplicationRequest, opts ...scw.RequestOption) error { - var err error - - if fmt.Sprint(req.ApplicationID) == "" { - return errors.New("field ApplicationID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/iam/v1alpha1/applications/" + fmt.Sprint(req.ApplicationID) + "", - Headers: http.Header{}, - } - - err = s.client.Do(scwReq, nil, opts...) - if err != nil { - return err - } - return nil -} - -type ListGroupsRequest struct { - // OrderBy: sort order of groups. - // Default value: created_at_asc - OrderBy ListGroupsRequestOrderBy `json:"-"` - // Page: requested page number. Value must be greater or equal to 1. - // Default value: 1 - Page *int32 `json:"-"` - // PageSize: number of items per page. Value must be between 1 and 100. - // Default value: 20 - PageSize *uint32 `json:"-"` - // OrganizationID: filter by Organization ID. - OrganizationID *string `json:"-"` - // Name: name of group to find. - Name *string `json:"-"` - // ApplicationIDs: filter by a list of application IDs. - ApplicationIDs []string `json:"-"` - // UserIDs: filter by a list of user IDs. - UserIDs []string `json:"-"` - // GroupIDs: filter by a list of group IDs. - GroupIDs []string `json:"-"` -} - -// ListGroups: list groups. -// List groups. By default, the groups listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You can define additional parameters to filter your query. Use `user_ids` or `application_ids` to list all groups certain users or applications belong to. -func (s *API) ListGroups(req *ListGroupsRequest, opts ...scw.RequestOption) (*ListGroupsResponse, error) { - var err error - - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "order_by", req.OrderBy) - parameter.AddToQuery(query, "page", req.Page) - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "organization_id", req.OrganizationID) - parameter.AddToQuery(query, "name", req.Name) - parameter.AddToQuery(query, "application_ids", req.ApplicationIDs) - parameter.AddToQuery(query, "user_ids", req.UserIDs) - parameter.AddToQuery(query, "group_ids", req.GroupIDs) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/groups", - Query: query, - Headers: http.Header{}, - } - - var resp ListGroupsResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type CreateGroupRequest struct { - // OrganizationID: ID of Organization linked to the group. - OrganizationID string `json:"organization_id"` - // Name: name of the group to create (max length is 64 chars). MUST be unique inside an Organization. - Name string `json:"name"` - // Description: description of the group to create (max length is 200 chars). - Description string `json:"description"` -} - -// CreateGroup: create a group. -// Create a new group. You must define the `name` and `organization_id` parameters in the request. -func (s *API) CreateGroup(req *CreateGroupRequest, opts ...scw.RequestOption) (*Group, error) { - var err error - - if req.OrganizationID == "" { - defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() - req.OrganizationID = defaultOrganizationID - } - - if req.Name == "" { - req.Name = namegenerator.GetRandomName("grp") - } - - scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/iam/v1alpha1/groups", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp Group - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type GetGroupRequest struct { - // GroupID: ID of the group. - GroupID string `json:"-"` -} - -// GetGroup: get a group. -// Retrive information about a given group, specified by the `group_id` parameter. The group's full details, including `user_ids` and `application_ids` are returned in the response. -func (s *API) GetGroup(req *GetGroupRequest, opts ...scw.RequestOption) (*Group, error) { - var err error - - if fmt.Sprint(req.GroupID) == "" { - return nil, errors.New("field GroupID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/groups/" + fmt.Sprint(req.GroupID) + "", - Headers: http.Header{}, - } - - var resp Group - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type UpdateGroupRequest struct { - // GroupID: ID of the group to update. - GroupID string `json:"-"` - // Name: new name for the group (max length is 64 chars). MUST be unique inside an Organization. - Name *string `json:"name"` - // Description: new description for the group (max length is 200 chars). - Description *string `json:"description"` -} - -// UpdateGroup: update a group. -// Update the parameters of group, including `name` and `description`. -func (s *API) UpdateGroup(req *UpdateGroupRequest, opts ...scw.RequestOption) (*Group, error) { - var err error - - if fmt.Sprint(req.GroupID) == "" { - return nil, errors.New("field GroupID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/iam/v1alpha1/groups/" + fmt.Sprint(req.GroupID) + "", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp Group - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type SetGroupMembersRequest struct { - GroupID string `json:"-"` - - UserIDs []string `json:"user_ids"` - - ApplicationIDs []string `json:"application_ids"` -} - -// SetGroupMembers: overwrite users and applications of a group. -// Overwrite users and applications configuration in a group. Any information that you add using this command will overwrite the previous configuration. -func (s *API) SetGroupMembers(req *SetGroupMembersRequest, opts ...scw.RequestOption) (*Group, error) { - var err error - - if fmt.Sprint(req.GroupID) == "" { - return nil, errors.New("field GroupID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/iam/v1alpha1/groups/" + fmt.Sprint(req.GroupID) + "/members", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp Group - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - +// AddGroupMemberRequest: add group member request. type AddGroupMemberRequest struct { // GroupID: ID of the group. GroupID string `json:"-"` + // UserID: ID of the user to add. - // Precisely one of ApplicationID, UserID must be set. + // Precisely one of UserID, ApplicationID must be set. UserID *string `json:"user_id,omitempty"` + // ApplicationID: ID of the application to add. - // Precisely one of ApplicationID, UserID must be set. + // Precisely one of UserID, ApplicationID must be set. ApplicationID *string `json:"application_id,omitempty"` } -// AddGroupMember: add a user or an application to a group. -// Add a user or an application to a group. You can specify a `user_id` and and `application_id` in the body of your request. Note that you can only add one of each per request. -func (s *API) AddGroupMember(req *AddGroupMemberRequest, opts ...scw.RequestOption) (*Group, error) { - var err error - - if fmt.Sprint(req.GroupID) == "" { - return nil, errors.New("field GroupID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/iam/v1alpha1/groups/" + fmt.Sprint(req.GroupID) + "/add-member", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp Group - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - +// AddGroupMembersRequest: add group members request. type AddGroupMembersRequest struct { // GroupID: ID of the group. GroupID string `json:"-"` + // UserIDs: iDs of the users to add. UserIDs []string `json:"user_ids"` + // ApplicationIDs: iDs of the applications to add. ApplicationIDs []string `json:"application_ids"` } -// AddGroupMembers: add multiple users and applications to a group. -// Add multiple users and applications to a group in a single call. You can specify an array of `user_id`s and `application_id`s. Note that any existing users and applications in the group will remain. To add new users/applications and delete pre-existing ones, use the [Overwrite users and applications of a group](#path-groups-overwrite-users-and-applications-of-a-group) method. -func (s *API) AddGroupMembers(req *AddGroupMembersRequest, opts ...scw.RequestOption) (*Group, error) { - var err error - - if fmt.Sprint(req.GroupID) == "" { - return nil, errors.New("field GroupID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/iam/v1alpha1/groups/" + fmt.Sprint(req.GroupID) + "/add-members", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp Group - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type RemoveGroupMemberRequest struct { - // GroupID: ID of the group. - GroupID string `json:"-"` - // UserID: ID of the user to remove. - // Precisely one of ApplicationID, UserID must be set. - UserID *string `json:"user_id,omitempty"` - // ApplicationID: ID of the application to remove. - // Precisely one of ApplicationID, UserID must be set. - ApplicationID *string `json:"application_id,omitempty"` -} - -// RemoveGroupMember: remove a user or an application from a group. -// Remove a user or an application from a group. You can specify a `user_id` and and `application_id` in the body of your request. Note that you can only remove one of each per request. Removing a user from a group means that any permissions given to them via the group (i.e. from an attached policy) will no longer apply. Be sure you want to remove these permissions from the user before proceeding. -func (s *API) RemoveGroupMember(req *RemoveGroupMemberRequest, opts ...scw.RequestOption) (*Group, error) { - var err error - - if fmt.Sprint(req.GroupID) == "" { - return nil, errors.New("field GroupID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/iam/v1alpha1/groups/" + fmt.Sprint(req.GroupID) + "/remove-member", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp Group - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type DeleteGroupRequest struct { - // GroupID: ID of the group to delete. - GroupID string `json:"-"` -} - -// DeleteGroup: delete a group. -// Delete a group. Note that this action is irreversible and could delete permissions for group members. Policies attached to users and applications via this group will no longer apply. -func (s *API) DeleteGroup(req *DeleteGroupRequest, opts ...scw.RequestOption) error { - var err error - - if fmt.Sprint(req.GroupID) == "" { - return errors.New("field GroupID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/iam/v1alpha1/groups/" + fmt.Sprint(req.GroupID) + "", - Headers: http.Header{}, - } - - err = s.client.Do(scwReq, nil, opts...) - if err != nil { - return err - } - return nil -} - -type ListPoliciesRequest struct { - // OrderBy: criteria for sorting results. - // Default value: created_at_asc - OrderBy ListPoliciesRequestOrderBy `json:"-"` - // PageSize: number of results per page. Value must be between 1 and 100. - // Default value: 20 - PageSize *uint32 `json:"-"` - // Page: page number. Value must be greater than 1. - // Default value: 1 - Page *int32 `json:"-"` - // OrganizationID: ID of the Organization to filter. - OrganizationID *string `json:"-"` - // Editable: defines whether or not filter out editable policies. - Editable *bool `json:"-"` - // UserIDs: defines whether or not to filter by list of user IDs. - UserIDs []string `json:"-"` - // GroupIDs: defines whether or not to filter by list of group IDs. - GroupIDs []string `json:"-"` - // ApplicationIDs: filter by a list of application IDs. - ApplicationIDs []string `json:"-"` - // NoPrincipal: defines whether or not the policy is attributed to a principal. - NoPrincipal *bool `json:"-"` - // PolicyName: name of the policy to fetch. - PolicyName *string `json:"-"` -} - -// ListPolicies: list policies of an Organization. -// List the policies of an Organization. By default, the policies listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You must define the `organization_id` in the query path of your request. You can also define additional parameters to filter your query, such as `user_ids`, `groups_ids`, `application_ids`, and `policy_name`. -func (s *API) ListPolicies(req *ListPoliciesRequest, opts ...scw.RequestOption) (*ListPoliciesResponse, error) { - var err error - - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "order_by", req.OrderBy) - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "page", req.Page) - parameter.AddToQuery(query, "organization_id", req.OrganizationID) - parameter.AddToQuery(query, "editable", req.Editable) - parameter.AddToQuery(query, "user_ids", req.UserIDs) - parameter.AddToQuery(query, "group_ids", req.GroupIDs) - parameter.AddToQuery(query, "application_ids", req.ApplicationIDs) - parameter.AddToQuery(query, "no_principal", req.NoPrincipal) - parameter.AddToQuery(query, "policy_name", req.PolicyName) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/policies", - Query: query, - Headers: http.Header{}, - } - - var resp ListPoliciesResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type CreatePolicyRequest struct { - // Name: name of the policy to create (max length is 64 characters). - Name string `json:"name"` - // Description: description of the policy to create (max length is 200 characters). - Description string `json:"description"` - // OrganizationID: ID of the Organization. - OrganizationID string `json:"organization_id"` - // Rules: rules of the policy to create. - Rules []*RuleSpecs `json:"rules"` - // UserID: ID of user attributed to the policy. - // Precisely one of ApplicationID, GroupID, NoPrincipal, UserID must be set. - UserID *string `json:"user_id,omitempty"` - // GroupID: ID of group attributed to the policy. - // Precisely one of ApplicationID, GroupID, NoPrincipal, UserID must be set. - GroupID *string `json:"group_id,omitempty"` - // ApplicationID: ID of application attributed to the policy. - // Precisely one of ApplicationID, GroupID, NoPrincipal, UserID must be set. - ApplicationID *string `json:"application_id,omitempty"` - // NoPrincipal: defines whether or not a policy is attributed to a principal. - // Precisely one of ApplicationID, GroupID, NoPrincipal, UserID must be set. - NoPrincipal *bool `json:"no_principal,omitempty"` -} - -// CreatePolicy: create a new policy. -// Create a new application. You must define the `name` parameter in the request. You can specify parameters such as `user_id`, `groups_id`, `application_id`, `no_principal`, `rules` and its child attributes. -func (s *API) CreatePolicy(req *CreatePolicyRequest, opts ...scw.RequestOption) (*Policy, error) { - var err error - - if req.OrganizationID == "" { - defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() - req.OrganizationID = defaultOrganizationID - } - - if req.Name == "" { - req.Name = namegenerator.GetRandomName("pol") - } - - scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/iam/v1alpha1/policies", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp Policy - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type GetPolicyRequest struct { - // PolicyID: id of policy to search. - PolicyID string `json:"-"` -} - -// GetPolicy: get an existing policy. -// Retrieve information about a policy, speficified by the `policy_id` parameter. The policy's full details, including `id`, `name`, `organization_id`, `nb_rules` and `nb_scopes`, `nb_permission_sets` are returned in the response. -func (s *API) GetPolicy(req *GetPolicyRequest, opts ...scw.RequestOption) (*Policy, error) { - var err error - - if fmt.Sprint(req.PolicyID) == "" { - return nil, errors.New("field PolicyID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/policies/" + fmt.Sprint(req.PolicyID) + "", - Headers: http.Header{}, - } - - var resp Policy - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type UpdatePolicyRequest struct { - // PolicyID: id of policy to update. - PolicyID string `json:"-"` - // Name: new name for the policy (max length is 64 characters). - Name *string `json:"name"` - // Description: new description of policy (max length is 200 characters). - Description *string `json:"description"` - // UserID: new ID of user attributed to the policy. - // Precisely one of ApplicationID, GroupID, NoPrincipal, UserID must be set. - UserID *string `json:"user_id,omitempty"` - // GroupID: new ID of group attributed to the policy. - // Precisely one of ApplicationID, GroupID, NoPrincipal, UserID must be set. - GroupID *string `json:"group_id,omitempty"` - // ApplicationID: new ID of application attributed to the policy. - // Precisely one of ApplicationID, GroupID, NoPrincipal, UserID must be set. - ApplicationID *string `json:"application_id,omitempty"` - // NoPrincipal: defines whether or not the policy is attributed to a principal. - // Precisely one of ApplicationID, GroupID, NoPrincipal, UserID must be set. - NoPrincipal *bool `json:"no_principal,omitempty"` -} - -// UpdatePolicy: update an existing policy. -// Update the parameters of a policy, including `name`, `description`, `user_id`, `group_id`, `application_id` and `no_principal`. -func (s *API) UpdatePolicy(req *UpdatePolicyRequest, opts ...scw.RequestOption) (*Policy, error) { - var err error - - if fmt.Sprint(req.PolicyID) == "" { - return nil, errors.New("field PolicyID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/iam/v1alpha1/policies/" + fmt.Sprint(req.PolicyID) + "", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp Policy - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type DeletePolicyRequest struct { - // PolicyID: id of policy to delete. - PolicyID string `json:"-"` -} - -// DeletePolicy: delete a policy. -// Delete a policy. You must define specify the `policy_id` parameter in your request. Note that when deleting a policy, all permissions it gives to its principal (user, group or application) will be revoked. -func (s *API) DeletePolicy(req *DeletePolicyRequest, opts ...scw.RequestOption) error { - var err error - - if fmt.Sprint(req.PolicyID) == "" { - return errors.New("field PolicyID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/iam/v1alpha1/policies/" + fmt.Sprint(req.PolicyID) + "", - Headers: http.Header{}, - } - - err = s.client.Do(scwReq, nil, opts...) - if err != nil { - return err - } - return nil -} - +// ClonePolicyRequest: clone policy request. type ClonePolicyRequest struct { PolicyID string `json:"-"` } -// ClonePolicy: clone a policy. -// Clone a policy. You must define specify the `policy_id` parameter in your request. -func (s *API) ClonePolicy(req *ClonePolicyRequest, opts ...scw.RequestOption) (*Policy, error) { - var err error - - if fmt.Sprint(req.PolicyID) == "" { - return nil, errors.New("field PolicyID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/iam/v1alpha1/policies/" + fmt.Sprint(req.PolicyID) + "/clone", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp Policy - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type SetRulesRequest struct { - // PolicyID: id of policy to update. - PolicyID string `json:"policy_id"` - // Rules: rules of the policy to set. - Rules []*RuleSpecs `json:"rules"` -} - -// SetRules: set rules of a given policy. -// Overwrite the rules of a given policy. Any information that you add using this command will overwrite the previous configuration. If you include some of the rules you already had in your previous configuration in your new one, but you change their order, the new order of display will apply. While policy rules are ordered, they have no impact on the access logic of IAM because rules are allow-only. -func (s *API) SetRules(req *SetRulesRequest, opts ...scw.RequestOption) (*SetRulesResponse, error) { - var err error - - scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/iam/v1alpha1/rules", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp SetRulesResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type ListRulesRequest struct { - // PolicyID: id of policy to search. - PolicyID *string `json:"-"` - // PageSize: number of results per page. Value must be between 1 and 100. - // Default value: 20 - PageSize *uint32 `json:"-"` - // Page: page number. Value must be greater than 1. - // Default value: 1 - Page *int32 `json:"-"` -} - -// ListRules: list rules of a given policy. -// List the rules of a given policy. By default, the rules listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You must define the `policy_id` in the query path of your request. -func (s *API) ListRules(req *ListRulesRequest, opts ...scw.RequestOption) (*ListRulesResponse, error) { - var err error - - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "policy_id", req.PolicyID) - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "page", req.Page) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/rules", - Query: query, - Headers: http.Header{}, - } - - var resp ListRulesResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type ListPermissionSetsRequest struct { - // OrderBy: criteria for sorting results. - // Default value: created_at_asc - OrderBy ListPermissionSetsRequestOrderBy `json:"-"` - // PageSize: number of results per page. Value must be between 1 and 100. - // Default value: 20 - PageSize *uint32 `json:"-"` - // Page: page number. Value must be greater than 1. - // Default value: 1 - Page *int32 `json:"-"` - // OrganizationID: filter by Organization ID. - OrganizationID string `json:"-"` -} - -// ListPermissionSets: list permission sets. -// List permission sets available for given Organization. You must define the `organization_id` in the query path of your request. -func (s *API) ListPermissionSets(req *ListPermissionSetsRequest, opts ...scw.RequestOption) (*ListPermissionSetsResponse, error) { - var err error - - if req.OrganizationID == "" { - defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() - req.OrganizationID = defaultOrganizationID - } - - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "order_by", req.OrderBy) - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "page", req.Page) - parameter.AddToQuery(query, "organization_id", req.OrganizationID) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/permission-sets", - Query: query, - Headers: http.Header{}, - } - - var resp ListPermissionSetsResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type ListAPIKeysRequest struct { - // OrderBy: criteria for sorting results. - // Default value: created_at_asc - OrderBy ListAPIKeysRequestOrderBy `json:"-"` - // Page: page number. Value must be greater or equal to 1. - // Default value: 1 - Page *int32 `json:"-"` - // PageSize: number of results per page. Value must be between 1 and 100. - // Default value: 20 - PageSize *uint32 `json:"-"` - // OrganizationID: ID of Organization. - OrganizationID *string `json:"-"` - // Deprecated: ApplicationID: ID of application that bears the API key. - ApplicationID *string `json:"-"` - // Deprecated: UserID: ID of user that bears the API key. - UserID *string `json:"-"` - // Editable: defines whether to filter out editable API keys or not. - Editable *bool `json:"-"` - // Expired: defines whether to filter out expired API keys or not. - Expired *bool `json:"-"` - // AccessKey: filter by access key. - AccessKey *string `json:"-"` - // Description: filter by description. - Description *string `json:"-"` - // BearerID: filter by bearer ID. - BearerID *string `json:"-"` - // BearerType: filter by type of bearer. - // Default value: unknown_bearer_type - BearerType BearerType `json:"-"` -} - -// ListAPIKeys: list API keys. -// List API keys. By default, the API keys listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You can define additional parameters for your query such as `editable`, `expired`, `access_key` and `bearer_id`. -func (s *API) ListAPIKeys(req *ListAPIKeysRequest, opts ...scw.RequestOption) (*ListAPIKeysResponse, error) { - var err error - - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "order_by", req.OrderBy) - parameter.AddToQuery(query, "page", req.Page) - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "organization_id", req.OrganizationID) - parameter.AddToQuery(query, "application_id", req.ApplicationID) - parameter.AddToQuery(query, "user_id", req.UserID) - parameter.AddToQuery(query, "editable", req.Editable) - parameter.AddToQuery(query, "expired", req.Expired) - parameter.AddToQuery(query, "access_key", req.AccessKey) - parameter.AddToQuery(query, "description", req.Description) - parameter.AddToQuery(query, "bearer_id", req.BearerID) - parameter.AddToQuery(query, "bearer_type", req.BearerType) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/api-keys", - Query: query, - Headers: http.Header{}, - } - - var resp ListAPIKeysResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - +// CreateAPIKeyRequest: create api key request. type CreateAPIKeyRequest struct { // ApplicationID: ID of the application. // Precisely one of ApplicationID, UserID must be set. ApplicationID *string `json:"application_id,omitempty"` + // UserID: ID of the user. // Precisely one of ApplicationID, UserID must be set. UserID *string `json:"user_id,omitempty"` + // ExpiresAt: expiration date of the API key. - ExpiresAt *time.Time `json:"expires_at"` + ExpiresAt *time.Time `json:"expires_at,omitempty"` + // DefaultProjectID: default Project ID to use with Object Storage. - DefaultProjectID *string `json:"default_project_id"` + DefaultProjectID *string `json:"default_project_id,omitempty"` + // Description: description of the API key (max length is 200 characters). Description string `json:"description"` } -// CreateAPIKey: create an API key. -// Create an API key. You must specify the `application_id` or the `user_id` and the description. You can also specify the `default_project_id` which is the Project ID of your preferred Project, to use with Object Storage. The `access_key` and `secret_key` values are returned in the response. Note that he secret key is only showed once. Make sure that you copy and store both keys somewhere safe. -func (s *API) CreateAPIKey(req *CreateAPIKeyRequest, opts ...scw.RequestOption) (*APIKey, error) { - var err error +// CreateApplicationRequest: create application request. +type CreateApplicationRequest struct { + // Name: name of the application to create (max length is 64 characters). + Name string `json:"name"` - scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/iam/v1alpha1/api-keys", - Headers: http.Header{}, - } + // OrganizationID: ID of the Organization. + OrganizationID string `json:"organization_id"` - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } + // Description: description of the application (max length is 200 characters). + Description string `json:"description"` - var resp APIKey - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil + // Tags: tags associated with the application (maximum of 10 tags). + Tags []string `json:"tags"` } -type GetAPIKeyRequest struct { - // AccessKey: access key to search for. - AccessKey string `json:"-"` +// CreateGroupRequest: create group request. +type CreateGroupRequest struct { + // OrganizationID: ID of Organization linked to the group. + OrganizationID string `json:"organization_id"` + + // Name: name of the group to create (max length is 64 chars). MUST be unique inside an Organization. + Name string `json:"name"` + + // Description: description of the group to create (max length is 200 chars). + Description string `json:"description"` + + // Tags: tags associated with the group (maximum of 10 tags). + Tags []string `json:"tags"` } -// GetAPIKey: get an API key. -// Retrive information about an API key, specified by the `access_key` parameter. The API key's details, including either the `user_id` or `application_id` of its bearer are returned in the response. Note that the string value for the `secret_key` is nullable, and therefore is not displayed in the response. The `secret_key` value is only displayed upon API key creation. -func (s *API) GetAPIKey(req *GetAPIKeyRequest, opts ...scw.RequestOption) (*APIKey, error) { - var err error +// CreatePolicyRequest: create policy request. +type CreatePolicyRequest struct { + // Name: name of the policy to create (max length is 64 characters). + Name string `json:"name"` - if fmt.Sprint(req.AccessKey) == "" { - return nil, errors.New("field AccessKey cannot be empty in request") - } + // Description: description of the policy to create (max length is 200 characters). + Description string `json:"description"` - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/api-keys/" + fmt.Sprint(req.AccessKey) + "", - Headers: http.Header{}, - } + // OrganizationID: ID of the Organization. + OrganizationID string `json:"organization_id"` - var resp APIKey + // Rules: rules of the policy to create. + Rules []*RuleSpecs `json:"rules"` - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil + // Tags: tags associated with the policy (maximum of 10 tags). + Tags []string `json:"tags"` + + // UserID: ID of user attributed to the policy. + // Precisely one of UserID, GroupID, ApplicationID, NoPrincipal must be set. + UserID *string `json:"user_id,omitempty"` + + // GroupID: ID of group attributed to the policy. + // Precisely one of UserID, GroupID, ApplicationID, NoPrincipal must be set. + GroupID *string `json:"group_id,omitempty"` + + // ApplicationID: ID of application attributed to the policy. + // Precisely one of UserID, GroupID, ApplicationID, NoPrincipal must be set. + ApplicationID *string `json:"application_id,omitempty"` + + // NoPrincipal: defines whether or not a policy is attributed to a principal. + // Precisely one of UserID, GroupID, ApplicationID, NoPrincipal must be set. + NoPrincipal *bool `json:"no_principal,omitempty"` } -type UpdateAPIKeyRequest struct { - // AccessKey: access key to update. - AccessKey string `json:"-"` - // DefaultProjectID: new default Project ID to set. - DefaultProjectID *string `json:"default_project_id"` - // Description: new description to update. - Description *string `json:"description"` +// CreateSSHKeyRequest: create ssh key request. +type CreateSSHKeyRequest struct { + // Name: name of the SSH key. Max length is 1000. + Name string `json:"name"` + + // PublicKey: SSH public key. Currently only the ssh-rsa, ssh-dss (DSA), ssh-ed25519 and ecdsa keys with NIST curves are supported. Max length is 65000. + PublicKey string `json:"public_key"` + + // ProjectID: project the resource is attributed to. + ProjectID string `json:"project_id"` } -// UpdateAPIKey: update an API key. -// Update the parameters of an API key, including `default_project_id` and `description`. -func (s *API) UpdateAPIKey(req *UpdateAPIKeyRequest, opts ...scw.RequestOption) (*APIKey, error) { - var err error +// CreateUserRequest: create user request. +type CreateUserRequest struct { + // OrganizationID: ID of the Organization. + OrganizationID string `json:"organization_id"` - if fmt.Sprint(req.AccessKey) == "" { - return nil, errors.New("field AccessKey cannot be empty in request") - } + // Email: email of the user. + Email string `json:"email"` - scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/iam/v1alpha1/api-keys/" + fmt.Sprint(req.AccessKey) + "", - Headers: http.Header{}, - } - - err = scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp APIKey - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil + // Tags: tags associated with the user. + Tags []string `json:"tags"` } +// DeleteAPIKeyRequest: delete api key request. type DeleteAPIKeyRequest struct { // AccessKey: access key to delete. AccessKey string `json:"-"` } -// DeleteAPIKey: delete an API key. -// Delete an API key. Note that this action is irreversible and cannot be undone. Make sure you update any configurations using the API keys you delete. -func (s *API) DeleteAPIKey(req *DeleteAPIKeyRequest, opts ...scw.RequestOption) error { - var err error - - if fmt.Sprint(req.AccessKey) == "" { - return errors.New("field AccessKey cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/iam/v1alpha1/api-keys/" + fmt.Sprint(req.AccessKey) + "", - Headers: http.Header{}, - } - - err = s.client.Do(scwReq, nil, opts...) - if err != nil { - return err - } - return nil +// DeleteApplicationRequest: delete application request. +type DeleteApplicationRequest struct { + // ApplicationID: ID of the application to delete. + ApplicationID string `json:"-"` } -type ListQuotaRequest struct { - // OrderBy: criteria for sorting results. - // Default value: name_asc - OrderBy ListQuotaRequestOrderBy `json:"-"` - // PageSize: number of results per page. Value must be between 1 and 100. - // Default value: 20 - PageSize *uint32 `json:"-"` - // Page: page number. Value must be greater than 1. - // Default value: 1 - Page *int32 `json:"-"` - // OrganizationID: filter by Organization ID. - OrganizationID string `json:"-"` -} - -// ListQuota: list all quotas in the Organization. -// List all product and features quota for an Organization, with their associated limits. By default, the quota listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You must define the `organization_id` in the query path of your request. -func (s *API) ListQuota(req *ListQuotaRequest, opts ...scw.RequestOption) (*ListQuotaResponse, error) { - var err error - - if req.OrganizationID == "" { - defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() - req.OrganizationID = defaultOrganizationID - } - - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "order_by", req.OrderBy) - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "page", req.Page) - parameter.AddToQuery(query, "organization_id", req.OrganizationID) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/quota", - Query: query, - Headers: http.Header{}, - } - - var resp ListQuotaResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type GetQuotumRequest struct { - // QuotumName: name of the quota to get. - QuotumName string `json:"-"` - // OrganizationID: ID of the Organization. - OrganizationID string `json:"-"` -} - -// GetQuotum: get a quota in the Organization. -// Retrieve information about a resource quota, speficified by the `quotum_name` parameter. The quota's `limit`, or whether it is unlimited, is returned in the response. -func (s *API) GetQuotum(req *GetQuotumRequest, opts ...scw.RequestOption) (*Quotum, error) { - var err error - - if req.OrganizationID == "" { - defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() - req.OrganizationID = defaultOrganizationID - } - - query := url.Values{} - parameter.AddToQuery(query, "organization_id", req.OrganizationID) - - if fmt.Sprint(req.QuotumName) == "" { - return nil, errors.New("field QuotumName cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/quota/" + fmt.Sprint(req.QuotumName) + "", - Query: query, - Headers: http.Header{}, - } - - var resp Quotum - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type ListJWTsRequest struct { - // OrderBy: criteria for sorting results. - // Default value: created_at_asc - OrderBy ListJWTsRequestOrderBy `json:"-"` - // AudienceID: ID of the user to search. - AudienceID *string `json:"-"` - // PageSize: number of results per page. Value must be between 1 and 100. - // Default value: 20 - PageSize *uint32 `json:"-"` - // Page: page number. Value must be greater to 1. - // Default value: 1 - Page *int32 `json:"-"` - // Expired: filter out expired JWTs or not. - Expired *bool `json:"-"` -} - -// ListJWTs: list JWTs. -func (s *API) ListJWTs(req *ListJWTsRequest, opts ...scw.RequestOption) (*ListJWTsResponse, error) { - var err error - - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "order_by", req.OrderBy) - parameter.AddToQuery(query, "audience_id", req.AudienceID) - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "page", req.Page) - parameter.AddToQuery(query, "expired", req.Expired) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/jwts", - Query: query, - Headers: http.Header{}, - } - - var resp ListJWTsResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type GetJWTRequest struct { - // Jti: jWT ID of the JWT to get. - Jti string `json:"-"` -} - -// GetJWT: get a JWT. -func (s *API) GetJWT(req *GetJWTRequest, opts ...scw.RequestOption) (*JWT, error) { - var err error - - if fmt.Sprint(req.Jti) == "" { - return nil, errors.New("field Jti cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/iam/v1alpha1/jwts/" + fmt.Sprint(req.Jti) + "", - Headers: http.Header{}, - } - - var resp JWT - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil +// DeleteGroupRequest: delete group request. +type DeleteGroupRequest struct { + // GroupID: ID of the group to delete. + GroupID string `json:"-"` } +// DeleteJWTRequest: delete jwt request. type DeleteJWTRequest struct { // Jti: jWT ID of the JWT to delete. Jti string `json:"-"` } -// DeleteJWT: delete a JWT. -func (s *API) DeleteJWT(req *DeleteJWTRequest, opts ...scw.RequestOption) error { - var err error - - if fmt.Sprint(req.Jti) == "" { - return errors.New("field Jti cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/iam/v1alpha1/jwts/" + fmt.Sprint(req.Jti) + "", - Headers: http.Header{}, - } - - err = s.client.Do(scwReq, nil, opts...) - if err != nil { - return err - } - return nil +// DeletePolicyRequest: delete policy request. +type DeletePolicyRequest struct { + // PolicyID: id of policy to delete. + PolicyID string `json:"-"` } -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListSSHKeysResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount +// DeleteSSHKeyRequest: delete ssh key request. +type DeleteSSHKeyRequest struct { + SSHKeyID string `json:"-"` } -// UnsafeAppend should not be used -// Internal usage only -func (r *ListSSHKeysResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListSSHKeysResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.SSHKeys = append(r.SSHKeys, results.SSHKeys...) - r.TotalCount += uint32(len(results.SSHKeys)) - return uint32(len(results.SSHKeys)), nil +// DeleteUserRequest: delete user request. +type DeleteUserRequest struct { + // UserID: ID of the user to delete. + UserID string `json:"-"` } -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListUsersResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount +// GetAPIKeyRequest: get api key request. +type GetAPIKeyRequest struct { + // AccessKey: access key to search for. + AccessKey string `json:"-"` } -// UnsafeAppend should not be used -// Internal usage only -func (r *ListUsersResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListUsersResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Users = append(r.Users, results.Users...) - r.TotalCount += uint32(len(results.Users)) - return uint32(len(results.Users)), nil +// GetApplicationRequest: get application request. +type GetApplicationRequest struct { + // ApplicationID: ID of the application to find. + ApplicationID string `json:"-"` } -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListApplicationsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount +// GetGroupRequest: get group request. +type GetGroupRequest struct { + // GroupID: ID of the group. + GroupID string `json:"-"` } -// UnsafeAppend should not be used -// Internal usage only -func (r *ListApplicationsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListApplicationsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Applications = append(r.Applications, results.Applications...) - r.TotalCount += uint32(len(results.Applications)) - return uint32(len(results.Applications)), nil +// GetJWTRequest: get jwt request. +type GetJWTRequest struct { + // Jti: jWT ID of the JWT to get. + Jti string `json:"-"` } -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListGroupsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount +// GetLogRequest: get log request. +type GetLogRequest struct { + // LogID: ID of the log. + LogID string `json:"-"` } -// UnsafeAppend should not be used -// Internal usage only -func (r *ListGroupsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListGroupsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Groups = append(r.Groups, results.Groups...) - r.TotalCount += uint32(len(results.Groups)) - return uint32(len(results.Groups)), nil +// GetPolicyRequest: get policy request. +type GetPolicyRequest struct { + // PolicyID: id of policy to search. + PolicyID string `json:"-"` } -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListPoliciesResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount +// GetQuotumRequest: get quotum request. +type GetQuotumRequest struct { + // QuotumName: name of the quota to get. + QuotumName string `json:"-"` + + // OrganizationID: ID of the Organization. + OrganizationID string `json:"-"` } -// UnsafeAppend should not be used -// Internal usage only -func (r *ListPoliciesResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListPoliciesResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Policies = append(r.Policies, results.Policies...) - r.TotalCount += uint32(len(results.Policies)) - return uint32(len(results.Policies)), nil +// GetSSHKeyRequest: get ssh key request. +type GetSSHKeyRequest struct { + // SSHKeyID: ID of the SSH key. + SSHKeyID string `json:"-"` } -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListRulesResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount +// GetUserRequest: get user request. +type GetUserRequest struct { + // UserID: ID of the user to find. + UserID string `json:"-"` } -// UnsafeAppend should not be used -// Internal usage only -func (r *ListRulesResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListRulesResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } +// ListAPIKeysRequest: list api keys request. +type ListAPIKeysRequest struct { + // OrderBy: criteria for sorting results. + // Default value: created_at_asc + OrderBy ListAPIKeysRequestOrderBy `json:"-"` - r.Rules = append(r.Rules, results.Rules...) - r.TotalCount += uint32(len(results.Rules)) - return uint32(len(results.Rules)), nil + // Page: page number. Value must be greater or equal to 1. + Page *int32 `json:"-"` + + // PageSize: number of results per page. Value must be between 1 and 100. + PageSize *uint32 `json:"-"` + + // OrganizationID: ID of Organization. + OrganizationID *string `json:"-"` + + // Deprecated: ApplicationID: ID of application that bears the API key. + // Precisely one of ApplicationID, UserID must be set. + ApplicationID *string `json:"application_id,omitempty"` + + // Deprecated: UserID: ID of user that bears the API key. + // Precisely one of ApplicationID, UserID must be set. + UserID *string `json:"user_id,omitempty"` + + // Editable: defines whether to filter out editable API keys or not. + Editable *bool `json:"-"` + + // Expired: defines whether to filter out expired API keys or not. + Expired *bool `json:"-"` + + // AccessKey: filter by access key. + AccessKey *string `json:"-"` + + // Description: filter by description. + Description *string `json:"-"` + + // BearerID: filter by bearer ID. + BearerID *string `json:"-"` + + // BearerType: filter by type of bearer. + // Default value: unknown_bearer_type + BearerType BearerType `json:"-"` } -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListPermissionSetsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} +// ListAPIKeysResponse: list api keys response. +type ListAPIKeysResponse struct { + // APIKeys: list of API keys. + APIKeys []*APIKey `json:"api_keys"` -// UnsafeAppend should not be used -// Internal usage only -func (r *ListPermissionSetsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListPermissionSetsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.PermissionSets = append(r.PermissionSets, results.PermissionSets...) - r.TotalCount += uint32(len(results.PermissionSets)) - return uint32(len(results.PermissionSets)), nil + // TotalCount: total count of API Keys. + TotalCount uint32 `json:"total_count"` } // UnsafeGetTotalCount should not be used @@ -2670,23 +1296,145 @@ func (r *ListAPIKeysResponse) UnsafeAppend(res interface{}) (uint32, error) { return uint32(len(results.APIKeys)), nil } +// ListApplicationsRequest: list applications request. +type ListApplicationsRequest struct { + // OrderBy: criteria for sorting results. + // Default value: created_at_asc + OrderBy ListApplicationsRequestOrderBy `json:"-"` + + // PageSize: number of results per page. Value must be between 1 and 100. + PageSize *uint32 `json:"-"` + + // Page: page number. Value must be greater than 1. + Page *int32 `json:"-"` + + // Name: name of the application to filter. + Name *string `json:"-"` + + // OrganizationID: ID of the Organization to filter. + OrganizationID string `json:"-"` + + // Editable: defines whether to filter out editable applications or not. + Editable *bool `json:"-"` + + // ApplicationIDs: filter by list of IDs. + ApplicationIDs []string `json:"-"` + + // Tag: filter by tags containing a given string. + Tag *string `json:"-"` +} + +// ListApplicationsResponse: list applications response. +type ListApplicationsResponse struct { + // Applications: list of applications. + Applications []*Application `json:"applications"` + + // TotalCount: total count of applications. + TotalCount uint32 `json:"total_count"` +} + // UnsafeGetTotalCount should not be used // Internal usage only -func (r *ListQuotaResponse) UnsafeGetTotalCount() uint64 { +func (r *ListApplicationsResponse) UnsafeGetTotalCount() uint32 { return r.TotalCount } // UnsafeAppend should not be used // Internal usage only -func (r *ListQuotaResponse) UnsafeAppend(res interface{}) (uint64, error) { - results, ok := res.(*ListQuotaResponse) +func (r *ListApplicationsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListApplicationsResponse) if !ok { return 0, errors.New("%T type cannot be appended to type %T", res, r) } - r.Quota = append(r.Quota, results.Quota...) - r.TotalCount += uint64(len(results.Quota)) - return uint64(len(results.Quota)), nil + r.Applications = append(r.Applications, results.Applications...) + r.TotalCount += uint32(len(results.Applications)) + return uint32(len(results.Applications)), nil +} + +// ListGroupsRequest: list groups request. +type ListGroupsRequest struct { + // OrderBy: sort order of groups. + // Default value: created_at_asc + OrderBy ListGroupsRequestOrderBy `json:"-"` + + // Page: requested page number. Value must be greater or equal to 1. + Page *int32 `json:"-"` + + // PageSize: number of items per page. Value must be between 1 and 100. + PageSize *uint32 `json:"-"` + + // OrganizationID: filter by Organization ID. + OrganizationID string `json:"-"` + + // Name: name of group to find. + Name *string `json:"-"` + + // ApplicationIDs: filter by a list of application IDs. + ApplicationIDs []string `json:"-"` + + // UserIDs: filter by a list of user IDs. + UserIDs []string `json:"-"` + + // GroupIDs: filter by a list of group IDs. + GroupIDs []string `json:"-"` + + // Tag: filter by tags containing a given string. + Tag *string `json:"-"` +} + +// ListGroupsResponse: list groups response. +type ListGroupsResponse struct { + // Groups: list of groups. + Groups []*Group `json:"groups"` + + // TotalCount: total count of groups. + TotalCount uint32 `json:"total_count"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListGroupsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListGroupsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListGroupsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Groups = append(r.Groups, results.Groups...) + r.TotalCount += uint32(len(results.Groups)) + return uint32(len(results.Groups)), nil +} + +// ListJWTsRequest: list jw ts request. +type ListJWTsRequest struct { + // OrderBy: criteria for sorting results. + // Default value: created_at_asc + OrderBy ListJWTsRequestOrderBy `json:"-"` + + // AudienceID: ID of the user to search. + AudienceID *string `json:"-"` + + // PageSize: number of results per page. Value must be between 1 and 100. + PageSize *uint32 `json:"-"` + + // Page: page number. Value must be greater to 1. + Page *int32 `json:"-"` + + // Expired: filter out expired JWTs or not. + Expired *bool `json:"-"` +} + +// ListJWTsResponse: list jw ts response. +type ListJWTsResponse struct { + Jwts []*JWT `json:"jwts"` + + TotalCount uint64 `json:"total_count"` } // UnsafeGetTotalCount should not be used @@ -2707,3 +1455,1711 @@ func (r *ListJWTsResponse) UnsafeAppend(res interface{}) (uint64, error) { r.TotalCount += uint64(len(results.Jwts)) return uint64(len(results.Jwts)), nil } + +// ListLogsRequest: list logs request. +type ListLogsRequest struct { + // OrderBy: criteria for sorting results. + // Default value: created_at_asc + OrderBy ListLogsRequestOrderBy `json:"-"` + + // OrganizationID: filter by Organization ID. + OrganizationID string `json:"-"` + + // PageSize: number of results per page. Value must be between 1 and 100. + PageSize *uint32 `json:"-"` + + // Page: page number. Value must be greater to 1. + Page *int32 `json:"-"` + + // CreatedAfter: defined whether or not to filter out logs created after this timestamp. + CreatedAfter *time.Time `json:"-"` + + // CreatedBefore: defined whether or not to filter out logs created before this timestamp. + CreatedBefore *time.Time `json:"-"` + + // Action: defined whether or not to filter out by a specific action. + // Default value: unknown_action + Action LogAction `json:"-"` + + // ResourceType: defined whether or not to filter out by a specific type of resource. + // Default value: unknown_resource_type + ResourceType LogResourceType `json:"-"` + + // Search: defined whether or not to filter out log by bearer ID or resource ID. + Search *string `json:"-"` +} + +// ListLogsResponse: list logs response. +type ListLogsResponse struct { + // Logs: list of logs. + Logs []*Log `json:"logs"` + + // TotalCount: total count of logs. + TotalCount uint64 `json:"total_count"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListLogsResponse) UnsafeGetTotalCount() uint64 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListLogsResponse) UnsafeAppend(res interface{}) (uint64, error) { + results, ok := res.(*ListLogsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Logs = append(r.Logs, results.Logs...) + r.TotalCount += uint64(len(results.Logs)) + return uint64(len(results.Logs)), nil +} + +// ListPermissionSetsRequest: list permission sets request. +type ListPermissionSetsRequest struct { + // OrderBy: criteria for sorting results. + // Default value: name_asc + OrderBy ListPermissionSetsRequestOrderBy `json:"-"` + + // PageSize: number of results per page. Value must be between 1 and 100. + PageSize *uint32 `json:"-"` + + // Page: page number. Value must be greater than 1. + Page *int32 `json:"-"` + + // OrganizationID: filter by Organization ID. + OrganizationID string `json:"-"` +} + +// ListPermissionSetsResponse: list permission sets response. +type ListPermissionSetsResponse struct { + // PermissionSets: list of permission sets. + PermissionSets []*PermissionSet `json:"permission_sets"` + + // TotalCount: total count of permission sets. + TotalCount uint32 `json:"total_count"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListPermissionSetsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListPermissionSetsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListPermissionSetsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.PermissionSets = append(r.PermissionSets, results.PermissionSets...) + r.TotalCount += uint32(len(results.PermissionSets)) + return uint32(len(results.PermissionSets)), nil +} + +// ListPoliciesRequest: list policies request. +type ListPoliciesRequest struct { + // OrderBy: criteria for sorting results. + // Default value: policy_name_asc + OrderBy ListPoliciesRequestOrderBy `json:"-"` + + // PageSize: number of results per page. Value must be between 1 and 100. + PageSize *uint32 `json:"-"` + + // Page: page number. Value must be greater than 1. + Page *int32 `json:"-"` + + // OrganizationID: ID of the Organization to filter. + OrganizationID string `json:"-"` + + // Editable: defines whether or not filter out editable policies. + Editable *bool `json:"-"` + + // UserIDs: defines whether or not to filter by list of user IDs. + UserIDs []string `json:"-"` + + // GroupIDs: defines whether or not to filter by list of group IDs. + GroupIDs []string `json:"-"` + + // ApplicationIDs: filter by a list of application IDs. + ApplicationIDs []string `json:"-"` + + // NoPrincipal: defines whether or not the policy is attributed to a principal. + NoPrincipal *bool `json:"-"` + + // PolicyName: name of the policy to fetch. + PolicyName *string `json:"-"` + + // Tag: filter by tags containing a given string. + Tag *string `json:"-"` +} + +// ListPoliciesResponse: list policies response. +type ListPoliciesResponse struct { + // Policies: list of policies. + Policies []*Policy `json:"policies"` + + // TotalCount: total count of policies. + TotalCount uint32 `json:"total_count"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListPoliciesResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListPoliciesResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListPoliciesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Policies = append(r.Policies, results.Policies...) + r.TotalCount += uint32(len(results.Policies)) + return uint32(len(results.Policies)), nil +} + +// ListQuotaRequest: list quota request. +type ListQuotaRequest struct { + // OrderBy: criteria for sorting results. + // Default value: name_asc + OrderBy ListQuotaRequestOrderBy `json:"-"` + + // PageSize: number of results per page. Value must be between 1 and 100. + PageSize *uint32 `json:"-"` + + // Page: page number. Value must be greater than 1. + Page *int32 `json:"-"` + + // OrganizationID: filter by Organization ID. + OrganizationID string `json:"-"` +} + +// ListQuotaResponse: list quota response. +type ListQuotaResponse struct { + // Quota: list of quota. + Quota []*Quotum `json:"quota"` + + // TotalCount: total count of quota. + TotalCount uint64 `json:"total_count"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListQuotaResponse) UnsafeGetTotalCount() uint64 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListQuotaResponse) UnsafeAppend(res interface{}) (uint64, error) { + results, ok := res.(*ListQuotaResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Quota = append(r.Quota, results.Quota...) + r.TotalCount += uint64(len(results.Quota)) + return uint64(len(results.Quota)), nil +} + +// ListRulesRequest: list rules request. +type ListRulesRequest struct { + // PolicyID: id of policy to search. + PolicyID string `json:"-"` + + // PageSize: number of results per page. Value must be between 1 and 100. + PageSize *uint32 `json:"-"` + + // Page: page number. Value must be greater than 1. + Page *int32 `json:"-"` +} + +// ListRulesResponse: list rules response. +type ListRulesResponse struct { + // Rules: rules of the policy. + Rules []*Rule `json:"rules"` + + // TotalCount: total count of rules. + TotalCount uint32 `json:"total_count"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListRulesResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListRulesResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListRulesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Rules = append(r.Rules, results.Rules...) + r.TotalCount += uint32(len(results.Rules)) + return uint32(len(results.Rules)), nil +} + +// ListSSHKeysRequest: list ssh keys request. +type ListSSHKeysRequest struct { + // OrderBy: sort order of the SSH keys. + // Default value: created_at_asc + OrderBy ListSSHKeysRequestOrderBy `json:"-"` + + // Page: requested page number. Value must be greater or equal to 1. + Page *int32 `json:"-"` + + // PageSize: number of items per page. Value must be between 1 and 100. + PageSize *uint32 `json:"-"` + + // OrganizationID: filter by Organization ID. + OrganizationID *string `json:"-"` + + // Name: name of group to find. + Name *string `json:"-"` + + // ProjectID: filter by Project ID. + ProjectID *string `json:"-"` + + // Disabled: defines whether to include disabled SSH keys or not. + Disabled *bool `json:"-"` +} + +// ListSSHKeysResponse: list ssh keys response. +type ListSSHKeysResponse struct { + // SSHKeys: list of SSH keys. + SSHKeys []*SSHKey `json:"ssh_keys"` + + // TotalCount: total count of SSH keys. + TotalCount uint32 `json:"total_count"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListSSHKeysResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListSSHKeysResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListSSHKeysResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.SSHKeys = append(r.SSHKeys, results.SSHKeys...) + r.TotalCount += uint32(len(results.SSHKeys)) + return uint32(len(results.SSHKeys)), nil +} + +// ListUsersRequest: list users request. +type ListUsersRequest struct { + // OrderBy: criteria for sorting results. + // Default value: created_at_asc + OrderBy ListUsersRequestOrderBy `json:"-"` + + // PageSize: number of results per page. Value must be between 1 and 100. + PageSize *uint32 `json:"-"` + + // Page: page number. Value must be greater or equal to 1. + Page *int32 `json:"-"` + + // OrganizationID: ID of the Organization to filter. + OrganizationID *string `json:"-"` + + // UserIDs: filter by list of IDs. + UserIDs []string `json:"-"` + + // Mfa: filter by MFA status. + Mfa *bool `json:"-"` + + // Tag: filter by tags containing a given string. + Tag *string `json:"-"` +} + +// ListUsersResponse: list users response. +type ListUsersResponse struct { + // Users: list of users. + Users []*User `json:"users"` + + // TotalCount: total count of users. + TotalCount uint32 `json:"total_count"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListUsersResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListUsersResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListUsersResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Users = append(r.Users, results.Users...) + r.TotalCount += uint32(len(results.Users)) + return uint32(len(results.Users)), nil +} + +// RemoveGroupMemberRequest: remove group member request. +type RemoveGroupMemberRequest struct { + // GroupID: ID of the group. + GroupID string `json:"-"` + + // UserID: ID of the user to remove. + // Precisely one of UserID, ApplicationID must be set. + UserID *string `json:"user_id,omitempty"` + + // ApplicationID: ID of the application to remove. + // Precisely one of UserID, ApplicationID must be set. + ApplicationID *string `json:"application_id,omitempty"` +} + +// SetGroupMembersRequest: set group members request. +type SetGroupMembersRequest struct { + GroupID string `json:"-"` + + UserIDs []string `json:"user_ids"` + + ApplicationIDs []string `json:"application_ids"` +} + +// SetRulesRequest: set rules request. +type SetRulesRequest struct { + // PolicyID: id of policy to update. + PolicyID string `json:"policy_id"` + + // Rules: rules of the policy to set. + Rules []*RuleSpecs `json:"rules"` +} + +// SetRulesResponse: set rules response. +type SetRulesResponse struct { + // Rules: rules of the policy. + Rules []*Rule `json:"rules"` +} + +// UpdateAPIKeyRequest: update api key request. +type UpdateAPIKeyRequest struct { + // AccessKey: access key to update. + AccessKey string `json:"-"` + + // DefaultProjectID: new default Project ID to set. + DefaultProjectID *string `json:"default_project_id,omitempty"` + + // Description: new description to update. + Description *string `json:"description,omitempty"` +} + +// UpdateApplicationRequest: update application request. +type UpdateApplicationRequest struct { + // ApplicationID: ID of the application to update. + ApplicationID string `json:"-"` + + // Name: new name for the application (max length is 64 chars). + Name *string `json:"name,omitempty"` + + // Description: new description for the application (max length is 200 chars). + Description *string `json:"description,omitempty"` + + // Tags: new tags for the application (maximum of 10 tags). + Tags *[]string `json:"tags,omitempty"` +} + +// UpdateGroupRequest: update group request. +type UpdateGroupRequest struct { + // GroupID: ID of the group to update. + GroupID string `json:"-"` + + // Name: new name for the group (max length is 64 chars). MUST be unique inside an Organization. + Name *string `json:"name,omitempty"` + + // Description: new description for the group (max length is 200 chars). + Description *string `json:"description,omitempty"` + + // Tags: new tags for the group (maximum of 10 tags). + Tags *[]string `json:"tags,omitempty"` +} + +// UpdatePolicyRequest: update policy request. +type UpdatePolicyRequest struct { + // PolicyID: id of policy to update. + PolicyID string `json:"-"` + + // Name: new name for the policy (max length is 64 characters). + Name *string `json:"name,omitempty"` + + // Description: new description of policy (max length is 200 characters). + Description *string `json:"description,omitempty"` + + // Tags: new tags for the policy (maximum of 10 tags). + Tags *[]string `json:"tags,omitempty"` + + // UserID: new ID of user attributed to the policy. + // Precisely one of UserID, GroupID, ApplicationID, NoPrincipal must be set. + UserID *string `json:"user_id,omitempty"` + + // GroupID: new ID of group attributed to the policy. + // Precisely one of UserID, GroupID, ApplicationID, NoPrincipal must be set. + GroupID *string `json:"group_id,omitempty"` + + // ApplicationID: new ID of application attributed to the policy. + // Precisely one of UserID, GroupID, ApplicationID, NoPrincipal must be set. + ApplicationID *string `json:"application_id,omitempty"` + + // NoPrincipal: defines whether or not the policy is attributed to a principal. + // Precisely one of UserID, GroupID, ApplicationID, NoPrincipal must be set. + NoPrincipal *bool `json:"no_principal,omitempty"` +} + +// UpdateSSHKeyRequest: update ssh key request. +type UpdateSSHKeyRequest struct { + SSHKeyID string `json:"-"` + + // Name: name of the SSH key. Max length is 1000. + Name *string `json:"name,omitempty"` + + // Disabled: enable or disable the SSH key. + Disabled *bool `json:"disabled,omitempty"` +} + +// IAM API. +type API struct { + client *scw.Client +} + +// NewAPI returns a API object from a Scaleway client. +func NewAPI(client *scw.Client) *API { + return &API{ + client: client, + } +} + +// ListSSHKeys: List SSH keys. By default, the SSH keys listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You can define additional parameters for your query such as `organization_id`, `name`, `project_id` and `disabled`. +func (s *API) ListSSHKeys(req *ListSSHKeysRequest, opts ...scw.RequestOption) (*ListSSHKeysResponse, error) { + var err error + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + query := url.Values{} + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "organization_id", req.OrganizationID) + parameter.AddToQuery(query, "name", req.Name) + parameter.AddToQuery(query, "project_id", req.ProjectID) + parameter.AddToQuery(query, "disabled", req.Disabled) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/ssh-keys", + Query: query, + } + + var resp ListSSHKeysResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// CreateSSHKey: Add a new SSH key to a Scaleway Project. You must specify the `name`, `public_key` and `project_id`. +func (s *API) CreateSSHKey(req *CreateSSHKeyRequest, opts ...scw.RequestOption) (*SSHKey, error) { + var err error + + if req.ProjectID == "" { + defaultProjectID, _ := s.client.GetDefaultProjectID() + req.ProjectID = defaultProjectID + } + + if req.Name == "" { + req.Name = namegenerator.GetRandomName("key") + } + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/iam/v1alpha1/ssh-keys", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp SSHKey + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetSSHKey: Retrieve information about a given SSH key, specified by the `ssh_key_id` parameter. The SSH key's full details, including `id`, `name`, `public_key`, and `project_id` are returned in the response. +func (s *API) GetSSHKey(req *GetSSHKeyRequest, opts ...scw.RequestOption) (*SSHKey, error) { + var err error + + if fmt.Sprint(req.SSHKeyID) == "" { + return nil, errors.New("field SSHKeyID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/ssh-keys/" + fmt.Sprint(req.SSHKeyID) + "", + } + + var resp SSHKey + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// UpdateSSHKey: Update the parameters of an SSH key, including `name` and `disable`. +func (s *API) UpdateSSHKey(req *UpdateSSHKeyRequest, opts ...scw.RequestOption) (*SSHKey, error) { + var err error + + if fmt.Sprint(req.SSHKeyID) == "" { + return nil, errors.New("field SSHKeyID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "PATCH", + Path: "/iam/v1alpha1/ssh-keys/" + fmt.Sprint(req.SSHKeyID) + "", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp SSHKey + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// DeleteSSHKey: Delete a given SSH key, specified by the `ssh_key_id`. Deleting an SSH is permanent, and cannot be undone. Note that you might need to update any configurations that used the SSH key. +func (s *API) DeleteSSHKey(req *DeleteSSHKeyRequest, opts ...scw.RequestOption) error { + var err error + + if fmt.Sprint(req.SSHKeyID) == "" { + return errors.New("field SSHKeyID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "DELETE", + Path: "/iam/v1alpha1/ssh-keys/" + fmt.Sprint(req.SSHKeyID) + "", + } + + err = s.client.Do(scwReq, nil, opts...) + if err != nil { + return err + } + return nil +} + +// ListUsers: List the users of an Organization. By default, the users listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You must define the `organization_id` in the query path of your request. You can also define additional parameters for your query such as `user_ids`. +func (s *API) ListUsers(req *ListUsersRequest, opts ...scw.RequestOption) (*ListUsersResponse, error) { + var err error + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + query := url.Values{} + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "organization_id", req.OrganizationID) + parameter.AddToQuery(query, "user_ids", req.UserIDs) + parameter.AddToQuery(query, "mfa", req.Mfa) + parameter.AddToQuery(query, "tag", req.Tag) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/users", + Query: query, + } + + var resp ListUsersResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetUser: Retrieve information about a user, specified by the `user_id` parameter. The user's full details, including `id`, `email`, `organization_id`, `status` and `mfa` are returned in the response. +func (s *API) GetUser(req *GetUserRequest, opts ...scw.RequestOption) (*User, error) { + var err error + + if fmt.Sprint(req.UserID) == "" { + return nil, errors.New("field UserID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/users/" + fmt.Sprint(req.UserID) + "", + } + + var resp User + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// DeleteUser: Remove a user from an Organization in which they are a guest. You must define the `user_id` in your request. Note that removing a user from an Organization automatically deletes their API keys, and any policies directly attached to them become orphaned. +func (s *API) DeleteUser(req *DeleteUserRequest, opts ...scw.RequestOption) error { + var err error + + if fmt.Sprint(req.UserID) == "" { + return errors.New("field UserID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "DELETE", + Path: "/iam/v1alpha1/users/" + fmt.Sprint(req.UserID) + "", + } + + err = s.client.Do(scwReq, nil, opts...) + if err != nil { + return err + } + return nil +} + +// CreateUser: Create a new user. You must define the `organization_id` and the `email` in your request. +func (s *API) CreateUser(req *CreateUserRequest, opts ...scw.RequestOption) (*User, error) { + var err error + + if req.OrganizationID == "" { + defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() + req.OrganizationID = defaultOrganizationID + } + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/iam/v1alpha1/users", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp User + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// ListApplications: List the applications of an Organization. By default, the applications listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You must define the `organization_id` in the query path of your request. You can also define additional parameters for your query such as `application_ids`. +func (s *API) ListApplications(req *ListApplicationsRequest, opts ...scw.RequestOption) (*ListApplicationsResponse, error) { + var err error + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + if req.OrganizationID == "" { + defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() + req.OrganizationID = defaultOrganizationID + } + + query := url.Values{} + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "name", req.Name) + parameter.AddToQuery(query, "organization_id", req.OrganizationID) + parameter.AddToQuery(query, "editable", req.Editable) + parameter.AddToQuery(query, "application_ids", req.ApplicationIDs) + parameter.AddToQuery(query, "tag", req.Tag) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/applications", + Query: query, + } + + var resp ListApplicationsResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// CreateApplication: Create a new application. You must define the `name` parameter in the request. +func (s *API) CreateApplication(req *CreateApplicationRequest, opts ...scw.RequestOption) (*Application, error) { + var err error + + if req.OrganizationID == "" { + defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() + req.OrganizationID = defaultOrganizationID + } + + if req.Name == "" { + req.Name = namegenerator.GetRandomName("app") + } + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/iam/v1alpha1/applications", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Application + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetApplication: Retrieve information about an application, specified by the `application_id` parameter. The application's full details, including `id`, `email`, `organization_id`, `status` and `two_factor_enabled` are returned in the response. +func (s *API) GetApplication(req *GetApplicationRequest, opts ...scw.RequestOption) (*Application, error) { + var err error + + if fmt.Sprint(req.ApplicationID) == "" { + return nil, errors.New("field ApplicationID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/applications/" + fmt.Sprint(req.ApplicationID) + "", + } + + var resp Application + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// UpdateApplication: Update the parameters of an application, including `name` and `description`. +func (s *API) UpdateApplication(req *UpdateApplicationRequest, opts ...scw.RequestOption) (*Application, error) { + var err error + + if fmt.Sprint(req.ApplicationID) == "" { + return nil, errors.New("field ApplicationID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "PATCH", + Path: "/iam/v1alpha1/applications/" + fmt.Sprint(req.ApplicationID) + "", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Application + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// DeleteApplication: Delete an application. Note that this action is irreversible and will automatically delete the application's API keys. Policies attached to users and applications via this group will no longer apply. +func (s *API) DeleteApplication(req *DeleteApplicationRequest, opts ...scw.RequestOption) error { + var err error + + if fmt.Sprint(req.ApplicationID) == "" { + return errors.New("field ApplicationID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "DELETE", + Path: "/iam/v1alpha1/applications/" + fmt.Sprint(req.ApplicationID) + "", + } + + err = s.client.Do(scwReq, nil, opts...) + if err != nil { + return err + } + return nil +} + +// ListGroups: List groups. By default, the groups listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You can define additional parameters to filter your query. Use `user_ids` or `application_ids` to list all groups certain users or applications belong to. +func (s *API) ListGroups(req *ListGroupsRequest, opts ...scw.RequestOption) (*ListGroupsResponse, error) { + var err error + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + if req.OrganizationID == "" { + defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() + req.OrganizationID = defaultOrganizationID + } + + query := url.Values{} + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "organization_id", req.OrganizationID) + parameter.AddToQuery(query, "name", req.Name) + parameter.AddToQuery(query, "application_ids", req.ApplicationIDs) + parameter.AddToQuery(query, "user_ids", req.UserIDs) + parameter.AddToQuery(query, "group_ids", req.GroupIDs) + parameter.AddToQuery(query, "tag", req.Tag) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/groups", + Query: query, + } + + var resp ListGroupsResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// CreateGroup: Create a new group. You must define the `name` and `organization_id` parameters in the request. +func (s *API) CreateGroup(req *CreateGroupRequest, opts ...scw.RequestOption) (*Group, error) { + var err error + + if req.OrganizationID == "" { + defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() + req.OrganizationID = defaultOrganizationID + } + + if req.Name == "" { + req.Name = namegenerator.GetRandomName("grp") + } + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/iam/v1alpha1/groups", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Group + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetGroup: Retrive information about a given group, specified by the `group_id` parameter. The group's full details, including `user_ids` and `application_ids` are returned in the response. +func (s *API) GetGroup(req *GetGroupRequest, opts ...scw.RequestOption) (*Group, error) { + var err error + + if fmt.Sprint(req.GroupID) == "" { + return nil, errors.New("field GroupID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/groups/" + fmt.Sprint(req.GroupID) + "", + } + + var resp Group + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// UpdateGroup: Update the parameters of group, including `name` and `description`. +func (s *API) UpdateGroup(req *UpdateGroupRequest, opts ...scw.RequestOption) (*Group, error) { + var err error + + if fmt.Sprint(req.GroupID) == "" { + return nil, errors.New("field GroupID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "PATCH", + Path: "/iam/v1alpha1/groups/" + fmt.Sprint(req.GroupID) + "", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Group + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// SetGroupMembers: Overwrite users and applications configuration in a group. Any information that you add using this command will overwrite the previous configuration. +func (s *API) SetGroupMembers(req *SetGroupMembersRequest, opts ...scw.RequestOption) (*Group, error) { + var err error + + if fmt.Sprint(req.GroupID) == "" { + return nil, errors.New("field GroupID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "PUT", + Path: "/iam/v1alpha1/groups/" + fmt.Sprint(req.GroupID) + "/members", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Group + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// AddGroupMember: Add a user or an application to a group. You can specify a `user_id` and and `application_id` in the body of your request. Note that you can only add one of each per request. +func (s *API) AddGroupMember(req *AddGroupMemberRequest, opts ...scw.RequestOption) (*Group, error) { + var err error + + if fmt.Sprint(req.GroupID) == "" { + return nil, errors.New("field GroupID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/iam/v1alpha1/groups/" + fmt.Sprint(req.GroupID) + "/add-member", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Group + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// AddGroupMembers: Add multiple users and applications to a group in a single call. You can specify an array of `user_id`s and `application_id`s. Note that any existing users and applications in the group will remain. To add new users/applications and delete pre-existing ones, use the [Overwrite users and applications of a group](#path-groups-overwrite-users-and-applications-of-a-group) method. +func (s *API) AddGroupMembers(req *AddGroupMembersRequest, opts ...scw.RequestOption) (*Group, error) { + var err error + + if fmt.Sprint(req.GroupID) == "" { + return nil, errors.New("field GroupID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/iam/v1alpha1/groups/" + fmt.Sprint(req.GroupID) + "/add-members", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Group + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// RemoveGroupMember: Remove a user or an application from a group. You can specify a `user_id` and and `application_id` in the body of your request. Note that you can only remove one of each per request. Removing a user from a group means that any permissions given to them via the group (i.e. from an attached policy) will no longer apply. Be sure you want to remove these permissions from the user before proceeding. +func (s *API) RemoveGroupMember(req *RemoveGroupMemberRequest, opts ...scw.RequestOption) (*Group, error) { + var err error + + if fmt.Sprint(req.GroupID) == "" { + return nil, errors.New("field GroupID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/iam/v1alpha1/groups/" + fmt.Sprint(req.GroupID) + "/remove-member", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Group + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// DeleteGroup: Delete a group. Note that this action is irreversible and could delete permissions for group members. Policies attached to users and applications via this group will no longer apply. +func (s *API) DeleteGroup(req *DeleteGroupRequest, opts ...scw.RequestOption) error { + var err error + + if fmt.Sprint(req.GroupID) == "" { + return errors.New("field GroupID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "DELETE", + Path: "/iam/v1alpha1/groups/" + fmt.Sprint(req.GroupID) + "", + } + + err = s.client.Do(scwReq, nil, opts...) + if err != nil { + return err + } + return nil +} + +// ListPolicies: List the policies of an Organization. By default, the policies listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You must define the `organization_id` in the query path of your request. You can also define additional parameters to filter your query, such as `user_ids`, `groups_ids`, `application_ids`, and `policy_name`. +func (s *API) ListPolicies(req *ListPoliciesRequest, opts ...scw.RequestOption) (*ListPoliciesResponse, error) { + var err error + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + if req.OrganizationID == "" { + defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() + req.OrganizationID = defaultOrganizationID + } + + query := url.Values{} + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "organization_id", req.OrganizationID) + parameter.AddToQuery(query, "editable", req.Editable) + parameter.AddToQuery(query, "user_ids", req.UserIDs) + parameter.AddToQuery(query, "group_ids", req.GroupIDs) + parameter.AddToQuery(query, "application_ids", req.ApplicationIDs) + parameter.AddToQuery(query, "no_principal", req.NoPrincipal) + parameter.AddToQuery(query, "policy_name", req.PolicyName) + parameter.AddToQuery(query, "tag", req.Tag) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/policies", + Query: query, + } + + var resp ListPoliciesResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// CreatePolicy: Create a new application. You must define the `name` parameter in the request. You can specify parameters such as `user_id`, `groups_id`, `application_id`, `no_principal`, `rules` and its child attributes. +func (s *API) CreatePolicy(req *CreatePolicyRequest, opts ...scw.RequestOption) (*Policy, error) { + var err error + + if req.OrganizationID == "" { + defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() + req.OrganizationID = defaultOrganizationID + } + + if req.Name == "" { + req.Name = namegenerator.GetRandomName("pol") + } + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/iam/v1alpha1/policies", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Policy + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetPolicy: Retrieve information about a policy, speficified by the `policy_id` parameter. The policy's full details, including `id`, `name`, `organization_id`, `nb_rules` and `nb_scopes`, `nb_permission_sets` are returned in the response. +func (s *API) GetPolicy(req *GetPolicyRequest, opts ...scw.RequestOption) (*Policy, error) { + var err error + + if fmt.Sprint(req.PolicyID) == "" { + return nil, errors.New("field PolicyID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/policies/" + fmt.Sprint(req.PolicyID) + "", + } + + var resp Policy + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// UpdatePolicy: Update the parameters of a policy, including `name`, `description`, `user_id`, `group_id`, `application_id` and `no_principal`. +func (s *API) UpdatePolicy(req *UpdatePolicyRequest, opts ...scw.RequestOption) (*Policy, error) { + var err error + + if fmt.Sprint(req.PolicyID) == "" { + return nil, errors.New("field PolicyID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "PATCH", + Path: "/iam/v1alpha1/policies/" + fmt.Sprint(req.PolicyID) + "", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Policy + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// DeletePolicy: Delete a policy. You must define specify the `policy_id` parameter in your request. Note that when deleting a policy, all permissions it gives to its principal (user, group or application) will be revoked. +func (s *API) DeletePolicy(req *DeletePolicyRequest, opts ...scw.RequestOption) error { + var err error + + if fmt.Sprint(req.PolicyID) == "" { + return errors.New("field PolicyID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "DELETE", + Path: "/iam/v1alpha1/policies/" + fmt.Sprint(req.PolicyID) + "", + } + + err = s.client.Do(scwReq, nil, opts...) + if err != nil { + return err + } + return nil +} + +// ClonePolicy: Clone a policy. You must define specify the `policy_id` parameter in your request. +func (s *API) ClonePolicy(req *ClonePolicyRequest, opts ...scw.RequestOption) (*Policy, error) { + var err error + + if fmt.Sprint(req.PolicyID) == "" { + return nil, errors.New("field PolicyID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/iam/v1alpha1/policies/" + fmt.Sprint(req.PolicyID) + "/clone", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp Policy + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// SetRules: Overwrite the rules of a given policy. Any information that you add using this command will overwrite the previous configuration. If you include some of the rules you already had in your previous configuration in your new one, but you change their order, the new order of display will apply. While policy rules are ordered, they have no impact on the access logic of IAM because rules are allow-only. +func (s *API) SetRules(req *SetRulesRequest, opts ...scw.RequestOption) (*SetRulesResponse, error) { + var err error + + scwReq := &scw.ScalewayRequest{ + Method: "PUT", + Path: "/iam/v1alpha1/rules", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp SetRulesResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// ListRules: List the rules of a given policy. By default, the rules listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You must define the `policy_id` in the query path of your request. +func (s *API) ListRules(req *ListRulesRequest, opts ...scw.RequestOption) (*ListRulesResponse, error) { + var err error + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + query := url.Values{} + parameter.AddToQuery(query, "policy_id", req.PolicyID) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "page", req.Page) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/rules", + Query: query, + } + + var resp ListRulesResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// ListPermissionSets: List permission sets available for given Organization. You must define the `organization_id` in the query path of your request. +func (s *API) ListPermissionSets(req *ListPermissionSetsRequest, opts ...scw.RequestOption) (*ListPermissionSetsResponse, error) { + var err error + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + if req.OrganizationID == "" { + defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() + req.OrganizationID = defaultOrganizationID + } + + query := url.Values{} + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "organization_id", req.OrganizationID) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/permission-sets", + Query: query, + } + + var resp ListPermissionSetsResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// ListAPIKeys: List API keys. By default, the API keys listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You can define additional parameters for your query such as `editable`, `expired`, `access_key` and `bearer_id`. +func (s *API) ListAPIKeys(req *ListAPIKeysRequest, opts ...scw.RequestOption) (*ListAPIKeysResponse, error) { + var err error + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + query := url.Values{} + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "organization_id", req.OrganizationID) + parameter.AddToQuery(query, "editable", req.Editable) + parameter.AddToQuery(query, "expired", req.Expired) + parameter.AddToQuery(query, "access_key", req.AccessKey) + parameter.AddToQuery(query, "description", req.Description) + parameter.AddToQuery(query, "bearer_id", req.BearerID) + parameter.AddToQuery(query, "bearer_type", req.BearerType) + parameter.AddToQuery(query, "application_id", req.ApplicationID) + parameter.AddToQuery(query, "user_id", req.UserID) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/api-keys", + Query: query, + } + + var resp ListAPIKeysResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// CreateAPIKey: Create an API key. You must specify the `application_id` or the `user_id` and the description. You can also specify the `default_project_id` which is the Project ID of your preferred Project, to use with Object Storage. The `access_key` and `secret_key` values are returned in the response. Note that he secret key is only showed once. Make sure that you copy and store both keys somewhere safe. +func (s *API) CreateAPIKey(req *CreateAPIKeyRequest, opts ...scw.RequestOption) (*APIKey, error) { + var err error + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/iam/v1alpha1/api-keys", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp APIKey + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetAPIKey: Retrive information about an API key, specified by the `access_key` parameter. The API key's details, including either the `user_id` or `application_id` of its bearer are returned in the response. Note that the string value for the `secret_key` is nullable, and therefore is not displayed in the response. The `secret_key` value is only displayed upon API key creation. +func (s *API) GetAPIKey(req *GetAPIKeyRequest, opts ...scw.RequestOption) (*APIKey, error) { + var err error + + if fmt.Sprint(req.AccessKey) == "" { + return nil, errors.New("field AccessKey cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/api-keys/" + fmt.Sprint(req.AccessKey) + "", + } + + var resp APIKey + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// UpdateAPIKey: Update the parameters of an API key, including `default_project_id` and `description`. +func (s *API) UpdateAPIKey(req *UpdateAPIKeyRequest, opts ...scw.RequestOption) (*APIKey, error) { + var err error + + if fmt.Sprint(req.AccessKey) == "" { + return nil, errors.New("field AccessKey cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "PATCH", + Path: "/iam/v1alpha1/api-keys/" + fmt.Sprint(req.AccessKey) + "", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp APIKey + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// DeleteAPIKey: Delete an API key. Note that this action is irreversible and cannot be undone. Make sure you update any configurations using the API keys you delete. +func (s *API) DeleteAPIKey(req *DeleteAPIKeyRequest, opts ...scw.RequestOption) error { + var err error + + if fmt.Sprint(req.AccessKey) == "" { + return errors.New("field AccessKey cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "DELETE", + Path: "/iam/v1alpha1/api-keys/" + fmt.Sprint(req.AccessKey) + "", + } + + err = s.client.Do(scwReq, nil, opts...) + if err != nil { + return err + } + return nil +} + +// ListQuota: List all product and features quota for an Organization, with their associated limits. By default, the quota listed are ordered by creation date in ascending order. This can be modified via the `order_by` field. You must define the `organization_id` in the query path of your request. +func (s *API) ListQuota(req *ListQuotaRequest, opts ...scw.RequestOption) (*ListQuotaResponse, error) { + var err error + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + if req.OrganizationID == "" { + defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() + req.OrganizationID = defaultOrganizationID + } + + query := url.Values{} + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "organization_id", req.OrganizationID) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/quota", + Query: query, + } + + var resp ListQuotaResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetQuotum: Retrieve information about a resource quota, specified by the `quotum_name` parameter. The quota's `limit`, or whether it is unlimited, is returned in the response. +func (s *API) GetQuotum(req *GetQuotumRequest, opts ...scw.RequestOption) (*Quotum, error) { + var err error + + if req.OrganizationID == "" { + defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() + req.OrganizationID = defaultOrganizationID + } + + query := url.Values{} + parameter.AddToQuery(query, "organization_id", req.OrganizationID) + + if fmt.Sprint(req.QuotumName) == "" { + return nil, errors.New("field QuotumName cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/quota/" + fmt.Sprint(req.QuotumName) + "", + Query: query, + } + + var resp Quotum + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// ListJWTs: List JWTs. +func (s *API) ListJWTs(req *ListJWTsRequest, opts ...scw.RequestOption) (*ListJWTsResponse, error) { + var err error + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + query := url.Values{} + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "audience_id", req.AudienceID) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "expired", req.Expired) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/jwts", + Query: query, + } + + var resp ListJWTsResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetJWT: Get a JWT. +func (s *API) GetJWT(req *GetJWTRequest, opts ...scw.RequestOption) (*JWT, error) { + var err error + + if fmt.Sprint(req.Jti) == "" { + return nil, errors.New("field Jti cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/jwts/" + fmt.Sprint(req.Jti) + "", + } + + var resp JWT + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// DeleteJWT: Delete a JWT. +func (s *API) DeleteJWT(req *DeleteJWTRequest, opts ...scw.RequestOption) error { + var err error + + if fmt.Sprint(req.Jti) == "" { + return errors.New("field Jti cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "DELETE", + Path: "/iam/v1alpha1/jwts/" + fmt.Sprint(req.Jti) + "", + } + + err = s.client.Do(scwReq, nil, opts...) + if err != nil { + return err + } + return nil +} + +// ListLogs: List logs available for given Organization. You must define the `organization_id` in the query path of your request. +func (s *API) ListLogs(req *ListLogsRequest, opts ...scw.RequestOption) (*ListLogsResponse, error) { + var err error + + if req.OrganizationID == "" { + defaultOrganizationID, _ := s.client.GetDefaultOrganizationID() + req.OrganizationID = defaultOrganizationID + } + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + query := url.Values{} + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "organization_id", req.OrganizationID) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "created_after", req.CreatedAfter) + parameter.AddToQuery(query, "created_before", req.CreatedBefore) + parameter.AddToQuery(query, "action", req.Action) + parameter.AddToQuery(query, "resource_type", req.ResourceType) + parameter.AddToQuery(query, "search", req.Search) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/logs", + Query: query, + } + + var resp ListLogsResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetLog: Retrieve information about a log, specified by the `log_id` parameter. The log's full details, including `id`, `ip`, `user_agent`, `action`, `bearer_id`, `resource_type` and `resource_id` are returned in the response. +func (s *API) GetLog(req *GetLogRequest, opts ...scw.RequestOption) (*Log, error) { + var err error + + if fmt.Sprint(req.LogID) == "" { + return nil, errors.New("field LogID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/iam/v1alpha1/logs/" + fmt.Sprint(req.LogID) + "", + } + + var resp Log + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/image_utils.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/image_utils.go index 108ce095fa..99538fdff1 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/image_utils.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/image_utils.go @@ -1,8 +1,6 @@ package instance import ( - "fmt" - "net/http" "time" "github.com/scaleway/scaleway-sdk-go/internal/async" @@ -56,68 +54,3 @@ func (s *API) WaitForImage(req *WaitForImageRequest, opts ...scw.RequestOption) } return image.(*Image), nil } - -type UpdateImageRequest struct { - Zone scw.Zone `json:"zone"` - ImageID string `json:"id"` - Name *string `json:"name,omitempty"` - Arch Arch `json:"arch,omitempty"` - CreationDate *time.Time `json:"creation_date"` - ModificationDate *time.Time `json:"modification_date"` - ExtraVolumes map[string]*VolumeTemplate `json:"extra_volumes"` - FromServer string `json:"from_server,omitempty"` - Organization string `json:"organization"` - Public bool `json:"public"` - RootVolume *VolumeSummary `json:"root_volume,omitempty"` - State ImageState `json:"state"` - Project string `json:"project"` - Tags *[]string `json:"tags,omitempty"` -} - -type UpdateImageResponse struct { - Image *Image -} - -func (s *API) UpdateImage(req *UpdateImageRequest, opts ...scw.RequestOption) (*UpdateImageResponse, error) { - // This function is the equivalent of the private setImage function that is not usable because the json tags and - // types are not compatible with what the compute API expects - - if req.Project == "" { - defaultProject, _ := s.client.GetDefaultProjectID() - req.Project = defaultProject - } - if req.Organization == "" { - defaultOrganization, _ := s.client.GetDefaultOrganizationID() - req.Organization = defaultOrganization - } - if req.Zone == "" { - defaultZone, _ := s.client.GetDefaultZone() - req.Zone = defaultZone - } - if fmt.Sprint(req.Zone) == "" { - return nil, errors.New("field Zone cannot be empty in request") - } - if fmt.Sprint(req.ImageID) == "" { - return nil, errors.New("field ID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/images/" + fmt.Sprint(req.ImageID) + "", - Headers: http.Header{}, - } - - err := scwReq.SetBody(req) - if err != nil { - return nil, err - } - - var resp UpdateImageResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - - return &resp, nil -} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_metadata_sdk.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_metadata_sdk.go index 5cba7531a5..6bf35d64b1 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_metadata_sdk.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_metadata_sdk.go @@ -14,12 +14,15 @@ import ( ) var ( - metadataURL = "http://169.254.42.42" metadataRetryBindPort = 200 ) +const metadataAPIv4 = "http://169.254.42.42" +const metadataAPIv6 = "http://[fd00:42::42]" + // MetadataAPI metadata API type MetadataAPI struct { + MetadataURL *string } // NewMetadataAPI returns a MetadataAPI object from a Scaleway client. @@ -27,9 +30,25 @@ func NewMetadataAPI() *MetadataAPI { return &MetadataAPI{} } +func (meta *MetadataAPI) getMetadataUrl() string { + if meta.MetadataURL != nil { + return *meta.MetadataURL + } + + for _, url := range []string{metadataAPIv4, metadataAPIv6} { + http.DefaultClient.Timeout = 3 * time.Second + resp, err := http.Get(url) + if err == nil && resp.StatusCode == 200 { + meta.MetadataURL = &url + return url + } + } + return metadataAPIv4 +} + // GetMetadata returns the metadata available from the server -func (*MetadataAPI) GetMetadata() (m *Metadata, err error) { - resp, err := http.Get(metadataURL + "/conf?format=json") +func (meta *MetadataAPI) GetMetadata() (m *Metadata, err error) { + resp, err := http.Get(meta.getMetadataUrl() + "/conf?format=json") if err != nil { return nil, errors.Wrap(err, "error getting metadataURL") } @@ -43,6 +62,18 @@ func (*MetadataAPI) GetMetadata() (m *Metadata, err error) { return metadata, nil } +// MetadataIP represents all public IPs attached +type MetadataIP struct { + ID string `json:"id"` + Address string `json:"address"` + Dynamic bool `json:"dynamic"` + Gateway string `json:"gateway"` + Netmask string `json:"netmask"` + Family string `json:"family"` + ProvisioningMode string `json:"provisioning_mode"` + Tags []string `json:"tags"` +} + // Metadata represents the struct return by the metadata API type Metadata struct { ID string `json:"id,omitempty"` @@ -51,13 +82,20 @@ type Metadata struct { Organization string `json:"organization,omitempty"` Project string `json:"project,omitempty"` CommercialType string `json:"commercial_type,omitempty"` - PublicIP struct { - Dynamic bool `json:"dynamic,omitempty"` - ID string `json:"id,omitempty"` - Address string `json:"address,omitempty"` + //PublicIP IPv4 only + PublicIP struct { + ID string `json:"id"` + Address string `json:"address"` + Dynamic bool `json:"dynamic"` + Gateway string `json:"gateway"` + Netmask string `json:"netmask"` + Family string `json:"family"` + ProvisioningMode string `json:"provisioning_mode"` } `json:"public_ip,omitempty"` - PrivateIP string `json:"private_ip,omitempty"` - IPv6 struct { + PublicIpsV4 []MetadataIP `json:"public_ips_v4,omitempty"` + PublicIpsV6 []MetadataIP `json:"public_ips_v6,omitempty"` + PrivateIP string `json:"private_ip,omitempty"` + IPv6 struct { Netmask string `json:"netmask,omitempty"` Gateway string `json:"gateway,omitempty"` Address string `json:"address,omitempty"` @@ -121,7 +159,7 @@ type Metadata struct { } // ListUserData returns the metadata available from the server -func (*MetadataAPI) ListUserData() (res *UserData, err error) { +func (meta *MetadataAPI) ListUserData() (res *UserData, err error) { retries := 0 for retries <= metadataRetryBindPort { port := rand.Intn(1024) @@ -140,7 +178,7 @@ func (*MetadataAPI) ListUserData() (res *UserData, err error) { }, } - resp, err := userdataClient.Get(metadataURL + "/user_data?format=json") + resp, err := userdataClient.Get(meta.getMetadataUrl() + "/user_data?format=json") if err != nil { retries++ // retry with a different source port continue @@ -158,7 +196,7 @@ func (*MetadataAPI) ListUserData() (res *UserData, err error) { } // GetUserData returns the value for the given metadata key -func (*MetadataAPI) GetUserData(key string) ([]byte, error) { +func (meta *MetadataAPI) GetUserData(key string) ([]byte, error) { if key == "" { return make([]byte, 0), errors.New("key must not be empty in GetUserData") } @@ -181,7 +219,7 @@ func (*MetadataAPI) GetUserData(key string) ([]byte, error) { }, } - resp, err := userdataClient.Get(metadataURL + "/user_data/" + key) + resp, err := userdataClient.Get(meta.getMetadataUrl() + "/user_data/" + key) if err != nil { retries++ // retry with a different source port continue @@ -199,7 +237,7 @@ func (*MetadataAPI) GetUserData(key string) ([]byte, error) { } // SetUserData sets the userdata key with the given value -func (*MetadataAPI) SetUserData(key string, value []byte) error { +func (meta *MetadataAPI) SetUserData(key string, value []byte) error { if key == "" { return errors.New("key must not be empty in SetUserData") } @@ -221,7 +259,7 @@ func (*MetadataAPI) SetUserData(key string, value []byte) error { }).DialContext, }, } - request, err := http.NewRequest("PATCH", metadataURL+"/user_data/"+key, bytes.NewBuffer(value)) + request, err := http.NewRequest("PATCH", meta.getMetadataUrl()+"/user_data/"+key, bytes.NewBuffer(value)) if err != nil { return errors.Wrap(err, "error creating patch userdata request") } @@ -238,7 +276,7 @@ func (*MetadataAPI) SetUserData(key string, value []byte) error { } // DeleteUserData deletes the userdata key and the associated value -func (*MetadataAPI) DeleteUserData(key string) error { +func (meta *MetadataAPI) DeleteUserData(key string) error { if key == "" { return errors.New("key must not be empty in DeleteUserData") } @@ -260,7 +298,7 @@ func (*MetadataAPI) DeleteUserData(key string) error { }).DialContext, }, } - request, err := http.NewRequest("DELETE", metadataURL+"/user_data/"+key, bytes.NewBuffer([]byte(""))) + request, err := http.NewRequest("DELETE", meta.getMetadataUrl()+"/user_data/"+key, bytes.NewBuffer([]byte(""))) if err != nil { return errors.Wrap(err, "error creating delete userdata request") } diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_sdk.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_sdk.go index c947200bed..0d497d291b 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_sdk.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_sdk.go @@ -39,30 +39,19 @@ var ( _ = namegenerator.GetRandomName ) -// API: instance API. -type API struct { - client *scw.Client -} - -// NewAPI returns a API object from a Scaleway client. -func NewAPI(client *scw.Client) *API { - return &API{ - client: client, - } -} - type Arch string const ( - ArchX86_64 = Arch("x86_64") - ArchArm = Arch("arm") - ArchArm64 = Arch("arm64") + ArchUnknownArch = Arch("unknown_arch") + ArchX86_64 = Arch("x86_64") + ArchArm = Arch("arm") + ArchArm64 = Arch("arm64") ) func (enum Arch) String() string { if enum == "" { // return default value if empty - return "x86_64" + return "unknown_arch" } return string(enum) } @@ -82,6 +71,38 @@ func (enum *Arch) UnmarshalJSON(data []byte) error { return nil } +type AttachServerVolumeRequestVolumeType string + +const ( + AttachServerVolumeRequestVolumeTypeUnknownVolumeType = AttachServerVolumeRequestVolumeType("unknown_volume_type") + AttachServerVolumeRequestVolumeTypeLSSD = AttachServerVolumeRequestVolumeType("l_ssd") + AttachServerVolumeRequestVolumeTypeBSSD = AttachServerVolumeRequestVolumeType("b_ssd") + AttachServerVolumeRequestVolumeTypeSbsVolume = AttachServerVolumeRequestVolumeType("sbs_volume") +) + +func (enum AttachServerVolumeRequestVolumeType) String() string { + if enum == "" { + // return default value if empty + return "unknown_volume_type" + } + return string(enum) +} + +func (enum AttachServerVolumeRequestVolumeType) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, enum)), nil +} + +func (enum *AttachServerVolumeRequestVolumeType) UnmarshalJSON(data []byte) error { + tmp := "" + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + *enum = AttachServerVolumeRequestVolumeType(AttachServerVolumeRequestVolumeType(tmp).String()) + return nil +} + type BootType string const ( @@ -335,14 +356,15 @@ func (enum *PrivateNICState) UnmarshalJSON(data []byte) error { type SecurityGroupPolicy string const ( - SecurityGroupPolicyAccept = SecurityGroupPolicy("accept") - SecurityGroupPolicyDrop = SecurityGroupPolicy("drop") + SecurityGroupPolicyUnknownPolicy = SecurityGroupPolicy("unknown_policy") + SecurityGroupPolicyAccept = SecurityGroupPolicy("accept") + SecurityGroupPolicyDrop = SecurityGroupPolicy("drop") ) func (enum SecurityGroupPolicy) String() string { if enum == "" { // return default value if empty - return "accept" + return "unknown_policy" } return string(enum) } @@ -365,14 +387,15 @@ func (enum *SecurityGroupPolicy) UnmarshalJSON(data []byte) error { type SecurityGroupRuleAction string const ( - SecurityGroupRuleActionAccept = SecurityGroupRuleAction("accept") - SecurityGroupRuleActionDrop = SecurityGroupRuleAction("drop") + SecurityGroupRuleActionUnknownAction = SecurityGroupRuleAction("unknown_action") + SecurityGroupRuleActionAccept = SecurityGroupRuleAction("accept") + SecurityGroupRuleActionDrop = SecurityGroupRuleAction("drop") ) func (enum SecurityGroupRuleAction) String() string { if enum == "" { // return default value if empty - return "accept" + return "unknown_action" } return string(enum) } @@ -395,14 +418,15 @@ func (enum *SecurityGroupRuleAction) UnmarshalJSON(data []byte) error { type SecurityGroupRuleDirection string const ( - SecurityGroupRuleDirectionInbound = SecurityGroupRuleDirection("inbound") - SecurityGroupRuleDirectionOutbound = SecurityGroupRuleDirection("outbound") + SecurityGroupRuleDirectionUnknownDirection = SecurityGroupRuleDirection("unknown_direction") + SecurityGroupRuleDirectionInbound = SecurityGroupRuleDirection("inbound") + SecurityGroupRuleDirectionOutbound = SecurityGroupRuleDirection("outbound") ) func (enum SecurityGroupRuleDirection) String() string { if enum == "" { // return default value if empty - return "inbound" + return "unknown_direction" } return string(enum) } @@ -425,16 +449,17 @@ func (enum *SecurityGroupRuleDirection) UnmarshalJSON(data []byte) error { type SecurityGroupRuleProtocol string const ( - SecurityGroupRuleProtocolTCP = SecurityGroupRuleProtocol("TCP") - SecurityGroupRuleProtocolUDP = SecurityGroupRuleProtocol("UDP") - SecurityGroupRuleProtocolICMP = SecurityGroupRuleProtocol("ICMP") - SecurityGroupRuleProtocolANY = SecurityGroupRuleProtocol("ANY") + SecurityGroupRuleProtocolUnknownProtocol = SecurityGroupRuleProtocol("unknown_protocol") + SecurityGroupRuleProtocolTCP = SecurityGroupRuleProtocol("TCP") + SecurityGroupRuleProtocolUDP = SecurityGroupRuleProtocol("UDP") + SecurityGroupRuleProtocolICMP = SecurityGroupRuleProtocol("ICMP") + SecurityGroupRuleProtocolANY = SecurityGroupRuleProtocol("ANY") ) func (enum SecurityGroupRuleProtocol) String() string { if enum == "" { // return default value if empty - return "TCP" + return "unknown_protocol" } return string(enum) } @@ -581,6 +606,39 @@ func (enum *ServerIPProvisioningMode) UnmarshalJSON(data []byte) error { return nil } +type ServerIPState string + +const ( + ServerIPStateUnknownState = ServerIPState("unknown_state") + ServerIPStateDetached = ServerIPState("detached") + ServerIPStateAttached = ServerIPState("attached") + ServerIPStatePending = ServerIPState("pending") + ServerIPStateError = ServerIPState("error") +) + +func (enum ServerIPState) String() string { + if enum == "" { + // return default value if empty + return "unknown_state" + } + return string(enum) +} + +func (enum ServerIPState) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, enum)), nil +} + +func (enum *ServerIPState) UnmarshalJSON(data []byte) error { + tmp := "" + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + *enum = ServerIPState(ServerIPState(tmp).String()) + return nil +} + type ServerState string const ( @@ -750,11 +808,11 @@ type VolumeServerState string const ( VolumeServerStateAvailable = VolumeServerState("available") VolumeServerStateSnapshotting = VolumeServerState("snapshotting") - VolumeServerStateError = VolumeServerState("error") VolumeServerStateFetching = VolumeServerState("fetching") VolumeServerStateResizing = VolumeServerState("resizing") VolumeServerStateSaving = VolumeServerState("saving") VolumeServerStateHotsyncing = VolumeServerState("hotsyncing") + VolumeServerStateError = VolumeServerState("error") ) func (enum VolumeServerState) String() string { @@ -817,11 +875,11 @@ type VolumeState string const ( VolumeStateAvailable = VolumeState("available") VolumeStateSnapshotting = VolumeState("snapshotting") - VolumeStateError = VolumeState("error") VolumeStateFetching = VolumeState("fetching") VolumeStateResizing = VolumeState("resizing") VolumeStateSaving = VolumeState("saving") VolumeStateHotsyncing = VolumeState("hotsyncing") + VolumeStateError = VolumeState("error") ) func (enum VolumeState) String() string { @@ -850,11 +908,12 @@ func (enum *VolumeState) UnmarshalJSON(data []byte) error { type VolumeVolumeType string const ( - VolumeVolumeTypeLSSD = VolumeVolumeType("l_ssd") - VolumeVolumeTypeBSSD = VolumeVolumeType("b_ssd") - VolumeVolumeTypeUnified = VolumeVolumeType("unified") - VolumeVolumeTypeScratch = VolumeVolumeType("scratch") - VolumeVolumeTypeSbsVolume = VolumeVolumeType("sbs_volume") + VolumeVolumeTypeLSSD = VolumeVolumeType("l_ssd") + VolumeVolumeTypeBSSD = VolumeVolumeType("b_ssd") + VolumeVolumeTypeUnified = VolumeVolumeType("unified") + VolumeVolumeTypeScratch = VolumeVolumeType("scratch") + VolumeVolumeTypeSbsVolume = VolumeVolumeType("sbs_volume") + VolumeVolumeTypeSbsSnapshot = VolumeVolumeType("sbs_snapshot") ) func (enum VolumeVolumeType) String() string { @@ -880,73 +939,710 @@ func (enum *VolumeVolumeType) UnmarshalJSON(data []byte) error { return nil } +// ServerSummary: server summary. +type ServerSummary struct { + ID string `json:"id"` + + Name string `json:"name"` +} + // Bootscript: bootscript. type Bootscript struct { // Bootcmdargs: bootscript arguments. Bootcmdargs string `json:"bootcmdargs"` + // Default: display if the bootscript is the default bootscript (if no other boot option is configured). Default bool `json:"default"` + // Dtb: provide information regarding a Device Tree Binary (DTB) for use with C1 servers. Dtb string `json:"dtb"` + // ID: bootscript ID. ID string `json:"id"` + // Initrd: initrd (initial ramdisk) configuration. Initrd string `json:"initrd"` + // Kernel: instance kernel version. Kernel string `json:"kernel"` + // Organization: bootscript Organization ID. Organization string `json:"organization"` + // Project: bootscript Project ID. Project string `json:"project"` + // Public: provide information if the bootscript is public. Public bool `json:"public"` + // Title: bootscript title. Title string `json:"title"` + // Arch: bootscript architecture. - // Default value: x86_64 + // Default value: unknown_arch Arch Arch `json:"arch"` + // Zone: zone in which the bootscript is located. Zone scw.Zone `json:"zone"` } -type CreateIPResponse struct { - IP *IP `json:"ip"` +// Volume: volume. +type Volume struct { + // ID: volume unique ID. + ID string `json:"id"` + + // Name: volume name. + Name string `json:"name"` + + // Deprecated: ExportURI: show the volume NBD export URI. + ExportURI *string `json:"export_uri"` + + // Size: volume disk size. + Size scw.Size `json:"size"` + + // VolumeType: volume type. + // Default value: l_ssd + VolumeType VolumeVolumeType `json:"volume_type"` + + // CreationDate: volume creation date. + CreationDate *time.Time `json:"creation_date"` + + // ModificationDate: volume modification date. + ModificationDate *time.Time `json:"modification_date"` + + // Organization: volume Organization ID. + Organization string `json:"organization"` + + // Project: volume Project ID. + Project string `json:"project"` + + // Tags: volume tags. + Tags []string `json:"tags"` + + // Server: instance attached to the volume. + Server *ServerSummary `json:"server"` + + // State: volume state. + // Default value: available + State VolumeState `json:"state"` + + // Zone: zone in which the volume is located. + Zone scw.Zone `json:"zone"` } -type CreateImageResponse struct { +// VolumeSummary: volume summary. +type VolumeSummary struct { + ID string `json:"id"` + + Name string `json:"name"` + + Size scw.Size `json:"size"` + + // VolumeType: default value: l_ssd + VolumeType VolumeVolumeType `json:"volume_type"` +} + +// ServerTypeNetworkInterface: server type network interface. +type ServerTypeNetworkInterface struct { + // InternalBandwidth: maximum internal bandwidth in bits per seconds. + InternalBandwidth *uint64 `json:"internal_bandwidth"` + + // InternetBandwidth: maximum internet bandwidth in bits per seconds. + InternetBandwidth *uint64 `json:"internet_bandwidth"` +} + +// ServerTypeVolumeConstraintSizes: server type volume constraint sizes. +type ServerTypeVolumeConstraintSizes struct { + // MinSize: minimum volume size in bytes. + MinSize scw.Size `json:"min_size"` + + // MaxSize: maximum volume size in bytes. + MaxSize scw.Size `json:"max_size"` +} + +// Image: image. +type Image struct { + ID string `json:"id"` + + Name string `json:"name"` + + // Arch: default value: unknown_arch + Arch Arch `json:"arch"` + + CreationDate *time.Time `json:"creation_date"` + + ModificationDate *time.Time `json:"modification_date"` + + // Deprecated + DefaultBootscript *Bootscript `json:"default_bootscript"` + + ExtraVolumes map[string]*Volume `json:"extra_volumes"` + + FromServer string `json:"from_server"` + + Organization string `json:"organization"` + + Public bool `json:"public"` + + RootVolume *VolumeSummary `json:"root_volume"` + + // State: default value: available + State ImageState `json:"state"` + + Project string `json:"project"` + + Tags []string `json:"tags"` + + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"zone"` +} + +// PlacementGroup: placement group. +type PlacementGroup struct { + // ID: placement group unique ID. + ID string `json:"id"` + + // Name: placement group name. + Name string `json:"name"` + + // Organization: placement group Organization ID. + Organization string `json:"organization"` + + // Project: placement group Project ID. + Project string `json:"project"` + + // Tags: placement group tags. + Tags []string `json:"tags"` + + // PolicyMode: select the failure mode when the placement cannot be respected, either optional or enforced. + // Default value: optional + PolicyMode PlacementGroupPolicyMode `json:"policy_mode"` + + // PolicyType: select the behavior of the placement group, either low_latency (group) or max_availability (spread). + // Default value: max_availability + PolicyType PlacementGroupPolicyType `json:"policy_type"` + + // PolicyRespected: returns true if the policy is respected, false otherwise. + PolicyRespected bool `json:"policy_respected"` + + // Zone: zone in which the placement group is located. + Zone scw.Zone `json:"zone"` +} + +// PrivateNIC: private nic. +type PrivateNIC struct { + // ID: private NIC unique ID. + ID string `json:"id"` + + // ServerID: instance to which the private NIC is attached. + ServerID string `json:"server_id"` + + // PrivateNetworkID: private Network the private NIC is attached to. + PrivateNetworkID string `json:"private_network_id"` + + // MacAddress: private NIC MAC address. + MacAddress string `json:"mac_address"` + + // State: private NIC state. + // Default value: available + State PrivateNICState `json:"state"` + + // Tags: private NIC tags. + Tags []string `json:"tags"` +} + +// SecurityGroupSummary: security group summary. +type SecurityGroupSummary struct { + ID string `json:"id"` + + Name string `json:"name"` +} + +// ServerIP: server ip. +type ServerIP struct { + // ID: unique ID of the IP address. + ID string `json:"id"` + + // Address: instance's public IP-Address. + Address net.IP `json:"address"` + + // Gateway: gateway's IP address. + Gateway net.IP `json:"gateway"` + + // Netmask: cIDR netmask. + Netmask string `json:"netmask"` + + // Family: IP address family (inet or inet6). + // Default value: inet + Family ServerIPIPFamily `json:"family"` + + // Dynamic: true if the IP address is dynamic. + Dynamic bool `json:"dynamic"` + + // ProvisioningMode: information about this address provisioning mode. + // Default value: manual + ProvisioningMode ServerIPProvisioningMode `json:"provisioning_mode"` + + // Tags: tags associated with the IP. + Tags []string `json:"tags"` + + // State: default value: unknown_state + State ServerIPState `json:"state"` +} + +// ServerIPv6: server i pv6. +type ServerIPv6 struct { + // Address: instance IPv6 IP-Address. + Address net.IP `json:"address"` + + // Gateway: iPv6 IP-addresses gateway. + Gateway net.IP `json:"gateway"` + + // Netmask: iPv6 IP-addresses CIDR netmask. + Netmask string `json:"netmask"` +} + +// ServerLocation: server location. +type ServerLocation struct { + ClusterID string `json:"cluster_id"` + + HypervisorID string `json:"hypervisor_id"` + + NodeID string `json:"node_id"` + + PlatformID string `json:"platform_id"` + + ZoneID string `json:"zone_id"` +} + +// ServerMaintenance: server maintenance. +type ServerMaintenance struct { + Reason string `json:"reason"` +} + +// VolumeServer: volume server. +type VolumeServer struct { + ID string `json:"id"` + + Name string `json:"name"` + + ExportURI string `json:"export_uri"` + + Organization string `json:"organization"` + + Server *ServerSummary `json:"server"` + + Size scw.Size `json:"size"` + + // VolumeType: default value: l_ssd + VolumeType VolumeServerVolumeType `json:"volume_type"` + + CreationDate *time.Time `json:"creation_date"` + + ModificationDate *time.Time `json:"modification_date"` + + // State: default value: available + State VolumeServerState `json:"state"` + + Project string `json:"project"` + + Boot bool `json:"boot"` + + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"zone"` +} + +// SnapshotBaseVolume: snapshot base volume. +type SnapshotBaseVolume struct { + // ID: volume ID on which the snapshot is based. + ID string `json:"id"` + + // Name: volume name on which the snapshot is based on. + Name string `json:"name"` +} + +// ServerTypeCapabilities: server type capabilities. +type ServerTypeCapabilities struct { + // BlockStorage: defines whether the Instance supports block storage. + BlockStorage *bool `json:"block_storage"` + + // BootTypes: list of supported boot types. + BootTypes []BootType `json:"boot_types"` +} + +// ServerTypeNetwork: server type network. +type ServerTypeNetwork struct { + // Interfaces: list of available network interfaces. + Interfaces []*ServerTypeNetworkInterface `json:"interfaces"` + + // SumInternalBandwidth: total maximum internal bandwidth in bits per seconds. + SumInternalBandwidth *uint64 `json:"sum_internal_bandwidth"` + + // SumInternetBandwidth: total maximum internet bandwidth in bits per seconds. + SumInternetBandwidth *uint64 `json:"sum_internet_bandwidth"` + + // IPv6Support: true if IPv6 is enabled. + IPv6Support bool `json:"ipv6_support"` +} + +// ServerTypeVolumeConstraintsByType: server type volume constraints by type. +type ServerTypeVolumeConstraintsByType struct { + // LSSD: local SSD volumes. + LSSD *ServerTypeVolumeConstraintSizes `json:"l_ssd"` +} + +// VolumeTypeCapabilities: volume type capabilities. +type VolumeTypeCapabilities struct { + Snapshot bool `json:"snapshot"` +} + +// VolumeTypeConstraints: volume type constraints. +type VolumeTypeConstraints struct { + Min scw.Size `json:"min"` + + Max scw.Size `json:"max"` +} + +// Server: server. +type Server struct { + // ID: instance unique ID. + ID string `json:"id"` + + // Name: instance name. + Name string `json:"name"` + + // Organization: instance Organization ID. + Organization string `json:"organization"` + + // Project: instance Project ID. + Project string `json:"project"` + + // AllowedActions: list of allowed actions on the Instance. + AllowedActions []ServerAction `json:"allowed_actions"` + + // Tags: tags associated with the Instance. + Tags []string `json:"tags"` + + // CommercialType: instance commercial type (eg. GP1-M). + CommercialType string `json:"commercial_type"` + + // CreationDate: instance creation date. + CreationDate *time.Time `json:"creation_date"` + + // DynamicIPRequired: true if a dynamic IPv4 is required. + DynamicIPRequired bool `json:"dynamic_ip_required"` + + // RoutedIPEnabled: true to configure the instance so it uses the new routed IP mode. + RoutedIPEnabled bool `json:"routed_ip_enabled"` + + // EnableIPv6: true if IPv6 is enabled. + EnableIPv6 bool `json:"enable_ipv6"` + + // Hostname: instance host name. + Hostname string `json:"hostname"` + + // Image: information about the Instance image. Image *Image `json:"image"` -} -type CreatePlacementGroupResponse struct { + // Protected: defines whether the Instance protection option is activated. + Protected bool `json:"protected"` + + // PrivateIP: private IP address of the Instance. + PrivateIP *string `json:"private_ip"` + + // PublicIP: information about the public IP. + PublicIP *ServerIP `json:"public_ip"` + + // PublicIPs: information about all the public IPs attached to the server. + PublicIPs []*ServerIP `json:"public_ips"` + + // MacAddress: the server's MAC address. + MacAddress string `json:"mac_address"` + + // ModificationDate: instance modification date. + ModificationDate *time.Time `json:"modification_date"` + + // State: instance state. + // Default value: running + State ServerState `json:"state"` + + // Location: instance location. + Location *ServerLocation `json:"location"` + + // IPv6: instance IPv6 address. + IPv6 *ServerIPv6 `json:"ipv6"` + + // Deprecated: Bootscript: instance bootscript. + Bootscript *Bootscript `json:"bootscript"` + + // BootType: instance boot type. + // Default value: local + BootType BootType `json:"boot_type"` + + // Volumes: instance volumes. + Volumes map[string]*VolumeServer `json:"volumes"` + + // SecurityGroup: instance security group. + SecurityGroup *SecurityGroupSummary `json:"security_group"` + + // Maintenances: instance planned maintenance. + Maintenances []*ServerMaintenance `json:"maintenances"` + + // StateDetail: detailed information about the Instance state. + StateDetail string `json:"state_detail"` + + // Arch: instance architecture. + // Default value: unknown_arch + Arch Arch `json:"arch"` + + // PlacementGroup: instance placement group. PlacementGroup *PlacementGroup `json:"placement_group"` + + // PrivateNics: instance private NICs. + PrivateNics []*PrivateNIC `json:"private_nics"` + + // Zone: zone in which the Instance is located. + Zone scw.Zone `json:"zone"` } -type CreatePrivateNICResponse struct { - PrivateNic *PrivateNIC `json:"private_nic"` +// IP: ip. +type IP struct { + ID string `json:"id"` + + Address net.IP `json:"address"` + + Reverse *string `json:"reverse"` + + Server *ServerSummary `json:"server"` + + Organization string `json:"organization"` + + Tags []string `json:"tags"` + + Project string `json:"project"` + + // Type: default value: unknown_iptype + Type IPType `json:"type"` + + // State: default value: unknown_state + State IPState `json:"state"` + + Prefix scw.IPNet `json:"prefix"` + + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"zone"` } -type CreateSecurityGroupResponse struct { - SecurityGroup *SecurityGroup `json:"security_group"` +// VolumeTemplate: volume template. +type VolumeTemplate struct { + // ID: UUID of the volume. + ID string `json:"id,omitempty"` + + // Name: name of the volume. + Name string `json:"name,omitempty"` + + // Size: disk size of the volume, must be a multiple of 512. + Size scw.Size `json:"size,omitempty"` + + // VolumeType: type of the volume. + // Default value: l_ssd + VolumeType VolumeVolumeType `json:"volume_type,omitempty"` + + // Deprecated: Organization: organization ID of the volume. + // Precisely one of Project, Organization must be set. + Organization *string `json:"organization,omitempty"` + + // Project: project ID of the volume. + // Precisely one of Project, Organization must be set. + Project *string `json:"project,omitempty"` } -type CreateSecurityGroupRuleResponse struct { - Rule *SecurityGroupRule `json:"rule"` +// SecurityGroup: security group. +type SecurityGroup struct { + // ID: security group unique ID. + ID string `json:"id"` + + // Name: security group name. + Name string `json:"name"` + + // Description: security group description. + Description string `json:"description"` + + // EnableDefaultSecurity: true if SMTP is blocked on IPv4 and IPv6. This feature is read only, please open a support ticket if you need to make it configurable. + EnableDefaultSecurity bool `json:"enable_default_security"` + + // InboundDefaultPolicy: default inbound policy. + // Default value: unknown_policy + InboundDefaultPolicy SecurityGroupPolicy `json:"inbound_default_policy"` + + // OutboundDefaultPolicy: default outbound policy. + // Default value: unknown_policy + OutboundDefaultPolicy SecurityGroupPolicy `json:"outbound_default_policy"` + + // Organization: security group Organization ID. + Organization string `json:"organization"` + + // Project: security group Project ID. + Project string `json:"project"` + + // Tags: security group tags. + Tags []string `json:"tags"` + + // Deprecated: OrganizationDefault: true if it is your default security group for this Organization ID. + OrganizationDefault *bool `json:"organization_default"` + + // ProjectDefault: true if it is your default security group for this Project ID. + ProjectDefault bool `json:"project_default"` + + // CreationDate: security group creation date. + CreationDate *time.Time `json:"creation_date"` + + // ModificationDate: security group modification date. + ModificationDate *time.Time `json:"modification_date"` + + // Servers: list of Instances attached to this security group. + Servers []*ServerSummary `json:"servers"` + + // Stateful: defines whether the security group is stateful. + Stateful bool `json:"stateful"` + + // State: security group state. + // Default value: available + State SecurityGroupState `json:"state"` + + // Zone: zone in which the security group is located. + Zone scw.Zone `json:"zone"` } -type CreateServerResponse struct { - Server *Server `json:"server"` +// SecurityGroupRule: security group rule. +type SecurityGroupRule struct { + ID string `json:"id"` + + // Protocol: default value: unknown_protocol + Protocol SecurityGroupRuleProtocol `json:"protocol"` + + // Direction: default value: unknown_direction + Direction SecurityGroupRuleDirection `json:"direction"` + + // Action: default value: unknown_action + Action SecurityGroupRuleAction `json:"action"` + + IPRange scw.IPNet `json:"ip_range"` + + DestPortFrom *uint32 `json:"dest_port_from"` + + DestPortTo *uint32 `json:"dest_port_to"` + + Position uint32 `json:"position"` + + Editable bool `json:"editable"` + + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"zone"` } -type CreateSnapshotResponse struct { - Snapshot *Snapshot `json:"snapshot"` +// VolumeServerTemplate: volume server template. +type VolumeServerTemplate struct { + // ID: UUID of the volume. + ID *string `json:"id,omitempty"` - Task *Task `json:"task"` + // Boot: force the Instance to boot on this volume. + Boot *bool `json:"boot,omitempty"` + + // Name: name of the volume. + Name *string `json:"name,omitempty"` + + // Size: disk size of the volume, must be a multiple of 512. + Size *scw.Size `json:"size,omitempty"` + + // VolumeType: type of the volume. + // Default value: l_ssd + VolumeType VolumeVolumeType `json:"volume_type,omitempty"` + + // BaseSnapshot: ID of the snapshot on which this volume will be based. + BaseSnapshot *string `json:"base_snapshot,omitempty"` + + // Organization: organization ID of the volume. + Organization *string `json:"organization,omitempty"` + + // Project: project ID of the volume. + Project *string `json:"project,omitempty"` } -type CreateVolumeResponse struct { - Volume *Volume `json:"volume"` +// Snapshot: snapshot. +type Snapshot struct { + // ID: snapshot ID. + ID string `json:"id"` + + // Name: snapshot name. + Name string `json:"name"` + + // Organization: snapshot Organization ID. + Organization string `json:"organization"` + + // Project: snapshot Project ID. + Project string `json:"project"` + + // Tags: snapshot tags. + Tags []string `json:"tags"` + + // VolumeType: snapshot volume type. + // Default value: l_ssd + VolumeType VolumeVolumeType `json:"volume_type"` + + // Size: snapshot size. + Size scw.Size `json:"size"` + + // State: snapshot state. + // Default value: available + State SnapshotState `json:"state"` + + // BaseVolume: volume on which the snapshot is based on. + BaseVolume *SnapshotBaseVolume `json:"base_volume"` + + // CreationDate: snapshot creation date. + CreationDate *time.Time `json:"creation_date"` + + // ModificationDate: snapshot modification date. + ModificationDate *time.Time `json:"modification_date"` + + // Zone: snapshot zone. + Zone scw.Zone `json:"zone"` + + // ErrorReason: reason for the failed snapshot import. + ErrorReason *string `json:"error_reason"` } +// Task: task. +type Task struct { + // ID: unique ID of the task. + ID string `json:"id"` + + // Description: description of the task. + Description string `json:"description"` + + // Progress: progress of the task in percent. + Progress int32 `json:"progress"` + + // StartedAt: task start date. + StartedAt *time.Time `json:"started_at"` + + // TerminatedAt: task end date. + TerminatedAt *time.Time `json:"terminated_at"` + + // Status: task status. + // Default value: pending + Status TaskStatus `json:"status"` + + HrefFrom string `json:"href_from"` + + HrefResult string `json:"href_result"` + + // Zone: zone in which the task is excecuted. + Zone scw.Zone `json:"zone"` +} + +// Dashboard: dashboard. type Dashboard struct { VolumesCount uint32 `json:"volumes_count"` @@ -979,52 +1675,821 @@ type Dashboard struct { PlacementGroupsCount uint32 `json:"placement_groups_count"` } +// PlacementGroupServer: placement group server. +type PlacementGroupServer struct { + // ID: instance UUID. + ID string `json:"id"` + + // Name: instance name. + Name string `json:"name"` + + // PolicyRespected: defines whether the placement group policy is respected (either 1 or 0). + PolicyRespected bool `json:"policy_respected"` +} + +// GetServerTypesAvailabilityResponseAvailability: get server types availability response availability. +type GetServerTypesAvailabilityResponseAvailability struct { + // Availability: default value: available + Availability ServerTypesAvailability `json:"availability"` +} + +// ServerType: server type. +type ServerType struct { + // Deprecated: MonthlyPrice: estimated monthly price, for a 30 days month, in Euro. + MonthlyPrice *float32 `json:"monthly_price"` + + // HourlyPrice: hourly price in Euro. + HourlyPrice float32 `json:"hourly_price"` + + // AltNames: alternative Instance name, if any. + AltNames []string `json:"alt_names"` + + // PerVolumeConstraint: additional volume constraints. + PerVolumeConstraint *ServerTypeVolumeConstraintsByType `json:"per_volume_constraint"` + + // VolumesConstraint: initial volume constraints. + VolumesConstraint *ServerTypeVolumeConstraintSizes `json:"volumes_constraint"` + + // Ncpus: number of CPU. + Ncpus uint32 `json:"ncpus"` + + // Gpu: number of GPU. + Gpu *uint64 `json:"gpu"` + + // RAM: available RAM in bytes. + RAM uint64 `json:"ram"` + + // Arch: CPU architecture. + // Default value: unknown_arch + Arch Arch `json:"arch"` + + // Baremetal: true if it is a baremetal Instance. + Baremetal bool `json:"baremetal"` + + // Network: network available for the Instance. + Network *ServerTypeNetwork `json:"network"` + + // Capabilities: capabilities. + Capabilities *ServerTypeCapabilities `json:"capabilities"` + + // ScratchStorageMaxSize: maximum available scratch storage. + ScratchStorageMaxSize *scw.Size `json:"scratch_storage_max_size"` +} + +// VolumeType: volume type. +type VolumeType struct { + DisplayName string `json:"display_name"` + + Capabilities *VolumeTypeCapabilities `json:"capabilities"` + + Constraints *VolumeTypeConstraints `json:"constraints"` +} + +// ServerActionRequestVolumeBackupTemplate: server action request volume backup template. +type ServerActionRequestVolumeBackupTemplate struct { + // VolumeType: overrides the `volume_type` of the snapshot for this volume. + // If omitted, the volume type of the original volume will be used. + // Default value: unknown_volume_type + VolumeType SnapshotVolumeType `json:"volume_type,omitempty"` +} + +// SetSecurityGroupRulesRequestRule: set security group rules request rule. +type SetSecurityGroupRulesRequestRule struct { + // ID: UUID of the security rule to update. If no value is provided, a new rule will be created. + ID *string `json:"id"` + + // Action: action to apply when the rule matches a packet. + // Default value: unknown_action + Action SecurityGroupRuleAction `json:"action"` + + // Protocol: protocol family this rule applies to. + // Default value: unknown_protocol + Protocol SecurityGroupRuleProtocol `json:"protocol"` + + // Direction: direction the rule applies to. + // Default value: unknown_direction + Direction SecurityGroupRuleDirection `json:"direction"` + + // IPRange: range of IP addresses these rules apply to. + IPRange scw.IPNet `json:"ip_range"` + + // DestPortFrom: beginning of the range of ports this rule applies to (inclusive). This value will be set to null if protocol is ICMP or ANY. + DestPortFrom *uint32 `json:"dest_port_from"` + + // DestPortTo: end of the range of ports this rule applies to (inclusive). This value will be set to null if protocol is ICMP or ANY, or if it is equal to dest_port_from. + DestPortTo *uint32 `json:"dest_port_to"` + + // Position: position of this rule in the security group rules list. If several rules are passed with the same position, the resulting order is undefined. + Position uint32 `json:"position"` + + // Editable: indicates if this rule is editable. Rules with the value false will be ignored. + Editable *bool `json:"editable"` + + // Zone: zone of the rule. This field is ignored. + Zone *scw.Zone `json:"zone"` +} + +// NullableStringValue: nullable string value. +type NullableStringValue struct { + Null bool `json:"null,omitempty"` + + Value string `json:"value,omitempty"` +} + +// VolumeImageUpdateTemplate: volume image update template. +type VolumeImageUpdateTemplate struct { + // ID: UUID of the snapshot. + ID string `json:"id,omitempty"` +} + +// SecurityGroupTemplate: security group template. +type SecurityGroupTemplate struct { + ID string `json:"id,omitempty"` + + Name string `json:"name,omitempty"` +} + +// ApplyBlockMigrationRequest: apply block migration request. +type ApplyBlockMigrationRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // VolumeID: the volume to migrate, along with potentially other resources, according to the migration plan generated with a call to PlanBlockMigration. + // Precisely one of VolumeID, SnapshotID must be set. + VolumeID *string `json:"volume_id,omitempty"` + + // SnapshotID: the snapshot to migrate, along with potentially other resources, according to the migration plan generated with a call to PlanBlockMigration. + // Precisely one of VolumeID, SnapshotID must be set. + SnapshotID *string `json:"snapshot_id,omitempty"` + + // ValidationKey: a value to be retrieved from a call to PlanBlockMigration, to confirm that the volume and/or snapshots specified in said plan should be migrated. + ValidationKey string `json:"validation_key,omitempty"` +} + +// AttachServerVolumeRequest: attach server volume request. +type AttachServerVolumeRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + ServerID string `json:"-"` + + VolumeID string `json:"volume_id,omitempty"` + + // VolumeType: default value: unknown_volume_type + VolumeType AttachServerVolumeRequestVolumeType `json:"volume_type,omitempty"` + + Boot *bool `json:"boot,omitempty"` +} + +// AttachServerVolumeResponse: attach server volume response. +type AttachServerVolumeResponse struct { + Server *Server `json:"server"` +} + +// CreateIPRequest: create ip request. +type CreateIPRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Deprecated: Organization: organization ID in which the IP is reserved. + // Precisely one of Project, Organization must be set. + Organization *string `json:"organization,omitempty"` + + // Project: project ID in which the IP is reserved. + // Precisely one of Project, Organization must be set. + Project *string `json:"project,omitempty"` + + // Tags: tags of the IP. + Tags []string `json:"tags,omitempty"` + + // Server: UUID of the Instance you want to attach the IP to. + Server *string `json:"server,omitempty"` + + // Type: IP type to reserve (either 'nat', 'routed_ipv4' or 'routed_ipv6'). + // Default value: unknown_iptype + Type IPType `json:"type,omitempty"` +} + +// CreateIPResponse: create ip response. +type CreateIPResponse struct { + IP *IP `json:"ip"` +} + +// CreateImageRequest: create image request. +type CreateImageRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Name: name of the image. + Name string `json:"name,omitempty"` + + // RootVolume: UUID of the snapshot. + RootVolume string `json:"root_volume,omitempty"` + + // Arch: architecture of the image. + // Default value: unknown_arch + Arch Arch `json:"arch,omitempty"` + + // Deprecated: DefaultBootscript: default bootscript of the image. + DefaultBootscript *string `json:"default_bootscript,omitempty"` + + // ExtraVolumes: additional volumes of the image. + ExtraVolumes map[string]*VolumeTemplate `json:"extra_volumes,omitempty"` + + // Deprecated: Organization: organization ID of the image. + // Precisely one of Project, Organization must be set. + Organization *string `json:"organization,omitempty"` + + // Project: project ID of the image. + // Precisely one of Project, Organization must be set. + Project *string `json:"project,omitempty"` + + // Tags: tags of the image. + Tags []string `json:"tags,omitempty"` + + // Public: true to create a public image. + Public *bool `json:"public,omitempty"` +} + +// CreateImageResponse: create image response. +type CreateImageResponse struct { + Image *Image `json:"image"` +} + +// CreatePlacementGroupRequest: create placement group request. +type CreatePlacementGroupRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Name: name of the placement group. + Name string `json:"name,omitempty"` + + // Deprecated: Organization: organization ID of the placement group. + // Precisely one of Project, Organization must be set. + Organization *string `json:"organization,omitempty"` + + // Project: project ID of the placement group. + // Precisely one of Project, Organization must be set. + Project *string `json:"project,omitempty"` + + // Tags: tags of the placement group. + Tags []string `json:"tags,omitempty"` + + // PolicyMode: operating mode of the placement group. + // Default value: optional + PolicyMode PlacementGroupPolicyMode `json:"policy_mode,omitempty"` + + // PolicyType: policy type of the placement group. + // Default value: max_availability + PolicyType PlacementGroupPolicyType `json:"policy_type,omitempty"` +} + +// CreatePlacementGroupResponse: create placement group response. +type CreatePlacementGroupResponse struct { + PlacementGroup *PlacementGroup `json:"placement_group"` +} + +// CreatePrivateNICRequest: create private nic request. +type CreatePrivateNICRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ServerID: UUID of the Instance the private NIC will be attached to. + ServerID string `json:"-"` + + // PrivateNetworkID: UUID of the private network where the private NIC will be attached. + PrivateNetworkID string `json:"private_network_id,omitempty"` + + // Tags: private NIC tags. + Tags []string `json:"tags,omitempty"` + + // IPIDs: ip_ids defined from IPAM. + IPIDs []string `json:"ip_ids,omitempty"` +} + +// CreatePrivateNICResponse: create private nic response. +type CreatePrivateNICResponse struct { + PrivateNic *PrivateNIC `json:"private_nic"` +} + +// CreateSecurityGroupRequest: create security group request. +type CreateSecurityGroupRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Name: name of the security group. + Name string `json:"name,omitempty"` + + // Description: description of the security group. + Description string `json:"description,omitempty"` + + // Deprecated: Organization: organization ID the security group belongs to. + // Precisely one of Project, Organization must be set. + Organization *string `json:"organization,omitempty"` + + // Project: project ID the security group belong to. + // Precisely one of Project, Organization must be set. + Project *string `json:"project,omitempty"` + + // Tags: tags of the security group. + Tags []string `json:"tags,omitempty"` + + // Deprecated: OrganizationDefault: defines whether this security group becomes the default security group for new Instances. + // Precisely one of OrganizationDefault, ProjectDefault must be set. + OrganizationDefault *bool `json:"organization_default,omitempty"` + + // ProjectDefault: whether this security group becomes the default security group for new Instances. + // Precisely one of OrganizationDefault, ProjectDefault must be set. + ProjectDefault *bool `json:"project_default,omitempty"` + + // Stateful: whether the security group is stateful or not. + Stateful bool `json:"stateful,omitempty"` + + // InboundDefaultPolicy: default policy for inbound rules. + // Default value: unknown_policy + InboundDefaultPolicy SecurityGroupPolicy `json:"inbound_default_policy,omitempty"` + + // OutboundDefaultPolicy: default policy for outbound rules. + // Default value: unknown_policy + OutboundDefaultPolicy SecurityGroupPolicy `json:"outbound_default_policy,omitempty"` + + // EnableDefaultSecurity: true to block SMTP on IPv4 and IPv6. This feature is read only, please open a support ticket if you need to make it configurable. + EnableDefaultSecurity *bool `json:"enable_default_security,omitempty"` +} + +// CreateSecurityGroupResponse: create security group response. +type CreateSecurityGroupResponse struct { + SecurityGroup *SecurityGroup `json:"security_group"` +} + +// CreateSecurityGroupRuleRequest: create security group rule request. +type CreateSecurityGroupRuleRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SecurityGroupID: UUID of the security group. + SecurityGroupID string `json:"-"` + + // Protocol: default value: unknown_protocol + Protocol SecurityGroupRuleProtocol `json:"protocol,omitempty"` + + // Direction: default value: unknown_direction + Direction SecurityGroupRuleDirection `json:"direction,omitempty"` + + // Action: default value: unknown_action + Action SecurityGroupRuleAction `json:"action,omitempty"` + + IPRange scw.IPNet `json:"ip_range,omitempty"` + + // DestPortFrom: beginning of the range of ports to apply this rule to (inclusive). + DestPortFrom *uint32 `json:"dest_port_from,omitempty"` + + // DestPortTo: end of the range of ports to apply this rule to (inclusive). + DestPortTo *uint32 `json:"dest_port_to,omitempty"` + + // Position: position of this rule in the security group rules list. + Position uint32 `json:"position,omitempty"` + + // Editable: indicates if this rule is editable (will be ignored). + Editable bool `json:"editable,omitempty"` +} + +// CreateSecurityGroupRuleResponse: create security group rule response. +type CreateSecurityGroupRuleResponse struct { + Rule *SecurityGroupRule `json:"rule"` +} + +// CreateServerRequest: create server request. +type CreateServerRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Name: instance name. + Name string `json:"name,omitempty"` + + // DynamicIPRequired: define if a dynamic IPv4 is required for the Instance. + DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` + + // RoutedIPEnabled: if true, configure the Instance so it uses the new routed IP mode. + RoutedIPEnabled *bool `json:"routed_ip_enabled,omitempty"` + + // CommercialType: define the Instance commercial type (i.e. GP1-S). + CommercialType string `json:"commercial_type,omitempty"` + + // Image: instance image ID or label. + Image string `json:"image,omitempty"` + + // Volumes: volumes attached to the server. + Volumes map[string]*VolumeServerTemplate `json:"volumes,omitempty"` + + // EnableIPv6: true if IPv6 is enabled on the server. + EnableIPv6 bool `json:"enable_ipv6,omitempty"` + + // PublicIP: ID of the reserved IP to attach to the Instance. + PublicIP *string `json:"public_ip,omitempty"` + + // PublicIPs: a list of reserved IP IDs to attach to the Instance. + PublicIPs *[]string `json:"public_ips,omitempty"` + + // BootType: boot type to use. + // Default value: local + BootType *BootType `json:"boot_type,omitempty"` + + // Deprecated: Bootscript: bootscript ID to use when `boot_type` is set to `bootscript`. + Bootscript *string `json:"bootscript,omitempty"` + + // Deprecated: Organization: instance Organization ID. + // Precisely one of Project, Organization must be set. + Organization *string `json:"organization,omitempty"` + + // Project: instance Project ID. + // Precisely one of Project, Organization must be set. + Project *string `json:"project,omitempty"` + + // Tags: instance tags. + Tags []string `json:"tags,omitempty"` + + // SecurityGroup: security group ID. + SecurityGroup *string `json:"security_group,omitempty"` + + // PlacementGroup: placement group ID if Instance must be part of a placement group. + PlacementGroup *string `json:"placement_group,omitempty"` +} + +// CreateServerResponse: create server response. +type CreateServerResponse struct { + Server *Server `json:"server"` +} + +// CreateSnapshotRequest: create snapshot request. +type CreateSnapshotRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Name: name of the snapshot. + Name string `json:"name,omitempty"` + + // VolumeID: UUID of the volume. + VolumeID *string `json:"volume_id,omitempty"` + + // Tags: tags of the snapshot. + Tags *[]string `json:"tags,omitempty"` + + // Deprecated: Organization: organization ID of the snapshot. + // Precisely one of Project, Organization must be set. + Organization *string `json:"organization,omitempty"` + + // Project: project ID of the snapshot. + // Precisely one of Project, Organization must be set. + Project *string `json:"project,omitempty"` + + // VolumeType: overrides the volume_type of the snapshot. + // If omitted, the volume type of the original volume will be used. + // Default value: unknown_volume_type + VolumeType SnapshotVolumeType `json:"volume_type,omitempty"` + + // Bucket: bucket name for snapshot imports. + Bucket *string `json:"bucket,omitempty"` + + // Key: object key for snapshot imports. + Key *string `json:"key,omitempty"` + + // Size: imported snapshot size, must be a multiple of 512. + Size *scw.Size `json:"size,omitempty"` +} + +// CreateSnapshotResponse: create snapshot response. +type CreateSnapshotResponse struct { + Snapshot *Snapshot `json:"snapshot"` + + Task *Task `json:"task"` +} + +// CreateVolumeRequest: create volume request. +type CreateVolumeRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Name: volume name. + Name string `json:"name,omitempty"` + + // Deprecated: Organization: volume Organization ID. + // Precisely one of Project, Organization must be set. + Organization *string `json:"organization,omitempty"` + + // Project: volume Project ID. + // Precisely one of Project, Organization must be set. + Project *string `json:"project,omitempty"` + + // Tags: volume tags. + Tags []string `json:"tags,omitempty"` + + // VolumeType: volume type. + // Default value: l_ssd + VolumeType VolumeVolumeType `json:"volume_type,omitempty"` + + // Size: volume disk size, must be a multiple of 512. + // Precisely one of Size, BaseSnapshot must be set. + Size *scw.Size `json:"size,omitempty"` + + // BaseSnapshot: ID of the snapshot on which this volume will be based. + // Precisely one of Size, BaseSnapshot must be set. + BaseSnapshot *string `json:"base_snapshot,omitempty"` +} + +// CreateVolumeResponse: create volume response. +type CreateVolumeResponse struct { + Volume *Volume `json:"volume"` +} + +// DeleteIPRequest: delete ip request. +type DeleteIPRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // IP: ID or address of the IP to delete. + IP string `json:"-"` +} + +// DeleteImageRequest: delete image request. +type DeleteImageRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ImageID: UUID of the image you want to delete. + ImageID string `json:"-"` +} + +// DeletePlacementGroupRequest: delete placement group request. +type DeletePlacementGroupRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // PlacementGroupID: UUID of the placement group you want to delete. + PlacementGroupID string `json:"-"` +} + +// DeletePrivateNICRequest: delete private nic request. +type DeletePrivateNICRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ServerID: instance to which the private NIC is attached. + ServerID string `json:"-"` + + // PrivateNicID: private NIC unique ID. + PrivateNicID string `json:"-"` +} + +// DeleteSecurityGroupRequest: delete security group request. +type DeleteSecurityGroupRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SecurityGroupID: UUID of the security group you want to delete. + SecurityGroupID string `json:"-"` +} + +// DeleteSecurityGroupRuleRequest: delete security group rule request. +type DeleteSecurityGroupRuleRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + SecurityGroupID string `json:"-"` + + SecurityGroupRuleID string `json:"-"` +} + +// DeleteServerRequest: delete server request. +type DeleteServerRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + ServerID string `json:"-"` +} + +// DeleteServerUserDataRequest: delete server user data request. +type DeleteServerUserDataRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ServerID: UUID of the Instance. + ServerID string `json:"-"` + + // Key: key of the user data to delete. + Key string `json:"-"` +} + +// DeleteSnapshotRequest: delete snapshot request. +type DeleteSnapshotRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SnapshotID: UUID of the snapshot you want to delete. + SnapshotID string `json:"-"` +} + +// DeleteVolumeRequest: delete volume request. +type DeleteVolumeRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // VolumeID: UUID of the volume you want to delete. + VolumeID string `json:"-"` +} + +// DetachServerVolumeRequest: detach server volume request. +type DetachServerVolumeRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + ServerID string `json:"-"` + + VolumeID string `json:"volume_id,omitempty"` +} + +// DetachServerVolumeResponse: detach server volume response. +type DetachServerVolumeResponse struct { + Server *Server `json:"server"` +} + +// ExportSnapshotRequest: export snapshot request. +type ExportSnapshotRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SnapshotID: snapshot ID. + SnapshotID string `json:"-"` + + // Bucket: s3 bucket name. + Bucket string `json:"bucket,omitempty"` + + // Key: s3 object key. + Key string `json:"key,omitempty"` +} + +// ExportSnapshotResponse: export snapshot response. type ExportSnapshotResponse struct { Task *Task `json:"task"` } +// GetBootscriptRequest: get bootscript request. +type GetBootscriptRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + BootscriptID string `json:"-"` +} + +// GetBootscriptResponse: get bootscript response. type GetBootscriptResponse struct { Bootscript *Bootscript `json:"bootscript"` } +// GetDashboardRequest: get dashboard request. +type GetDashboardRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + Organization *string `json:"-"` + + Project *string `json:"-"` +} + +// GetDashboardResponse: get dashboard response. type GetDashboardResponse struct { Dashboard *Dashboard `json:"dashboard"` } +// GetIPRequest: get ip request. +type GetIPRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // IP: IP ID or address to get. + IP string `json:"-"` +} + +// GetIPResponse: get ip response. type GetIPResponse struct { IP *IP `json:"ip"` } +// GetImageRequest: get image request. +type GetImageRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ImageID: UUID of the image you want to get. + ImageID string `json:"-"` +} + +// GetImageResponse: get image response. type GetImageResponse struct { Image *Image `json:"image"` } +// GetPlacementGroupRequest: get placement group request. +type GetPlacementGroupRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // PlacementGroupID: UUID of the placement group you want to get. + PlacementGroupID string `json:"-"` +} + +// GetPlacementGroupResponse: get placement group response. type GetPlacementGroupResponse struct { PlacementGroup *PlacementGroup `json:"placement_group"` } +// GetPlacementGroupServersRequest: get placement group servers request. +type GetPlacementGroupServersRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // PlacementGroupID: UUID of the placement group you want to get. + PlacementGroupID string `json:"-"` +} + // GetPlacementGroupServersResponse: get placement group servers response. type GetPlacementGroupServersResponse struct { // Servers: instances attached to the placement group. Servers []*PlacementGroupServer `json:"servers"` } +// GetPrivateNICRequest: get private nic request. +type GetPrivateNICRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ServerID: instance to which the private NIC is attached. + ServerID string `json:"-"` + + // PrivateNicID: private NIC unique ID. + PrivateNicID string `json:"-"` +} + +// GetPrivateNICResponse: get private nic response. type GetPrivateNICResponse struct { PrivateNic *PrivateNIC `json:"private_nic"` } +// GetSecurityGroupRequest: get security group request. +type GetSecurityGroupRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SecurityGroupID: UUID of the security group you want to get. + SecurityGroupID string `json:"-"` +} + +// GetSecurityGroupResponse: get security group response. type GetSecurityGroupResponse struct { SecurityGroup *SecurityGroup `json:"security_group"` } +// GetSecurityGroupRuleRequest: get security group rule request. +type GetSecurityGroupRuleRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + SecurityGroupID string `json:"-"` + + SecurityGroupRuleID string `json:"-"` +} + +// GetSecurityGroupRuleResponse: get security group rule response. type GetSecurityGroupRuleResponse struct { Rule *SecurityGroupRule `json:"rule"` } +// GetServerRequest: get server request. +type GetServerRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ServerID: UUID of the Instance you want to get. + ServerID string `json:"-"` +} + +// GetServerResponse: get server response. type GetServerResponse struct { Server *Server `json:"server"` } +// GetServerTypesAvailabilityRequest: get server types availability request. +type GetServerTypesAvailabilityRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // PerPage: a positive integer lower or equal to 100 to select the number of items to return. + PerPage *uint32 `json:"-"` + + // Page: a positive integer to choose the page to return. + Page *int32 `json:"-"` +} + // GetServerTypesAvailabilityResponse: get server types availability response. type GetServerTypesAvailabilityResponse struct { // Servers: map of server types. @@ -1033,55 +2498,784 @@ type GetServerTypesAvailabilityResponse struct { TotalCount uint32 `json:"total_count"` } -type GetServerTypesAvailabilityResponseAvailability struct { - // Availability: default value: available - Availability ServerTypesAvailability `json:"availability"` +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *GetServerTypesAvailabilityResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount } +// UnsafeAppend should not be used +// Internal usage only +func (r *GetServerTypesAvailabilityResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*GetServerTypesAvailabilityResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + if r.Servers == nil { + r.Servers = make(map[string]*GetServerTypesAvailabilityResponseAvailability) + } + for k, v := range results.Servers { + r.Servers[k] = v + } + r.TotalCount += uint32(len(results.Servers)) + return uint32(len(results.Servers)), nil +} + +// GetSnapshotRequest: get snapshot request. +type GetSnapshotRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SnapshotID: UUID of the snapshot you want to get. + SnapshotID string `json:"-"` +} + +// GetSnapshotResponse: get snapshot response. type GetSnapshotResponse struct { Snapshot *Snapshot `json:"snapshot"` } +// GetVolumeRequest: get volume request. +type GetVolumeRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // VolumeID: UUID of the volume you want to get. + VolumeID string `json:"-"` +} + +// GetVolumeResponse: get volume response. type GetVolumeResponse struct { Volume *Volume `json:"volume"` } -type IP struct { - ID string `json:"id"` +// ListBootscriptsRequest: list bootscripts request. +type ListBootscriptsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` - Address net.IP `json:"address"` + Arch *string `json:"-"` - Reverse *string `json:"reverse"` + Title *string `json:"-"` - Server *ServerSummary `json:"server"` + Default *bool `json:"-"` - Organization string `json:"organization"` + Public *bool `json:"-"` - Tags []string `json:"tags"` + PerPage *uint32 `json:"-"` - Project string `json:"project"` - // Type: default value: unknown_iptype - Type IPType `json:"type"` - // State: default value: unknown_state - State IPState `json:"state"` - - Prefix scw.IPNet `json:"prefix"` - - Zone scw.Zone `json:"zone"` + Page *int32 `json:"-"` } -type Image struct { - ID string `json:"id"` +// ListBootscriptsResponse: list bootscripts response. +type ListBootscriptsResponse struct { + // TotalCount: total number of bootscripts. + TotalCount uint32 `json:"total_count"` + + // Bootscripts: list of bootscripts. + Bootscripts []*Bootscript `json:"bootscripts"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListBootscriptsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListBootscriptsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListBootscriptsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Bootscripts = append(r.Bootscripts, results.Bootscripts...) + r.TotalCount += uint32(len(results.Bootscripts)) + return uint32(len(results.Bootscripts)), nil +} + +// ListDefaultSecurityGroupRulesRequest: list default security group rules request. +type ListDefaultSecurityGroupRulesRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` +} + +// ListIPsRequest: list i ps request. +type ListIPsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Project: project ID in which the IPs are reserved. + Project *string `json:"-"` + + // Organization: organization ID in which the IPs are reserved. + Organization *string `json:"-"` + + // Tags: filter IPs with these exact tags (to filter with several tags, use commas to separate them). + Tags []string `json:"-"` + + // Name: filter on the IP address (Works as a LIKE operation on the IP address). + Name *string `json:"-"` + + // PerPage: a positive integer lower or equal to 100 to select the number of items to return. + PerPage *uint32 `json:"-"` + + // Page: a positive integer to choose the page to return. + Page *int32 `json:"-"` + + // Type: filter on the IP Mobility IP type (whose value should be either 'nat', 'routed_ipv4' or 'routed_ipv6'). + Type *string `json:"-"` +} + +// ListIPsResponse: list i ps response. +type ListIPsResponse struct { + // TotalCount: total number of ips. + TotalCount uint32 `json:"total_count"` + + // IPs: list of ips. + IPs []*IP `json:"ips"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListIPsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListIPsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListIPsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.IPs = append(r.IPs, results.IPs...) + r.TotalCount += uint32(len(results.IPs)) + return uint32(len(results.IPs)), nil +} + +// ListImagesRequest: list images request. +type ListImagesRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + Organization *string `json:"-"` + + PerPage *uint32 `json:"-"` + + Page *int32 `json:"-"` + + Name *string `json:"-"` + + Public *bool `json:"-"` + + Arch *string `json:"-"` + + Project *string `json:"-"` + + Tags *string `json:"-"` +} + +// ListImagesResponse: list images response. +type ListImagesResponse struct { + // TotalCount: total number of images. + TotalCount uint32 `json:"total_count"` + + // Images: list of images. + Images []*Image `json:"images"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListImagesResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListImagesResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListImagesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Images = append(r.Images, results.Images...) + r.TotalCount += uint32(len(results.Images)) + return uint32(len(results.Images)), nil +} + +// ListPlacementGroupsRequest: list placement groups request. +type ListPlacementGroupsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // PerPage: a positive integer lower or equal to 100 to select the number of items to return. + PerPage *uint32 `json:"-"` + + // Page: a positive integer to choose the page to return. + Page *int32 `json:"-"` + + // Organization: list only placement groups of this Organization ID. + Organization *string `json:"-"` + + // Project: list only placement groups of this Project ID. + Project *string `json:"-"` + + // Tags: list placement groups with these exact tags (to filter with several tags, use commas to separate them). + Tags []string `json:"-"` + + // Name: filter placement groups by name (for eg. "cluster1" will return "cluster100" and "cluster1" but not "foo"). + Name *string `json:"-"` +} + +// ListPlacementGroupsResponse: list placement groups response. +type ListPlacementGroupsResponse struct { + // TotalCount: total number of placement groups. + TotalCount uint32 `json:"total_count"` + + // PlacementGroups: list of placement groups. + PlacementGroups []*PlacementGroup `json:"placement_groups"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListPlacementGroupsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListPlacementGroupsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListPlacementGroupsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.PlacementGroups = append(r.PlacementGroups, results.PlacementGroups...) + r.TotalCount += uint32(len(results.PlacementGroups)) + return uint32(len(results.PlacementGroups)), nil +} + +// ListPrivateNICsRequest: list private ni cs request. +type ListPrivateNICsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ServerID: instance to which the private NIC is attached. + ServerID string `json:"-"` + + // Tags: private NIC tags. + Tags []string `json:"-"` + + // PerPage: a positive integer lower or equal to 100 to select the number of items to return. + PerPage *uint32 `json:"-"` + + // Page: a positive integer to choose the page to return. + Page *int32 `json:"-"` +} + +// ListPrivateNICsResponse: list private ni cs response. +type ListPrivateNICsResponse struct { + PrivateNics []*PrivateNIC `json:"private_nics"` + + TotalCount uint64 `json:"total_count"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListPrivateNICsResponse) UnsafeGetTotalCount() uint64 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListPrivateNICsResponse) UnsafeAppend(res interface{}) (uint64, error) { + results, ok := res.(*ListPrivateNICsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.PrivateNics = append(r.PrivateNics, results.PrivateNics...) + r.TotalCount += uint64(len(results.PrivateNics)) + return uint64(len(results.PrivateNics)), nil +} + +// ListSecurityGroupRulesRequest: list security group rules request. +type ListSecurityGroupRulesRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SecurityGroupID: UUID of the security group. + SecurityGroupID string `json:"-"` + + // PerPage: a positive integer lower or equal to 100 to select the number of items to return. + PerPage *uint32 `json:"-"` + + // Page: a positive integer to choose the page to return. + Page *int32 `json:"-"` +} + +// ListSecurityGroupRulesResponse: list security group rules response. +type ListSecurityGroupRulesResponse struct { + // TotalCount: total number of security groups. + TotalCount uint32 `json:"total_count"` + + // Rules: list of security rules. + Rules []*SecurityGroupRule `json:"rules"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListSecurityGroupRulesResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListSecurityGroupRulesResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListSecurityGroupRulesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Rules = append(r.Rules, results.Rules...) + r.TotalCount += uint32(len(results.Rules)) + return uint32(len(results.Rules)), nil +} + +// ListSecurityGroupsRequest: list security groups request. +type ListSecurityGroupsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Name: name of the security group. + Name *string `json:"-"` + + // Organization: security group Organization ID. + Organization *string `json:"-"` + + // Project: security group Project ID. + Project *string `json:"-"` + + // Tags: list security groups with these exact tags (to filter with several tags, use commas to separate them). + Tags []string `json:"-"` + + // ProjectDefault: filter security groups with this value for project_default. + ProjectDefault *bool `json:"-"` + + // PerPage: a positive integer lower or equal to 100 to select the number of items to return. + PerPage *uint32 `json:"-"` + + // Page: a positive integer to choose the page to return. + Page *int32 `json:"-"` +} + +// ListSecurityGroupsResponse: list security groups response. +type ListSecurityGroupsResponse struct { + // TotalCount: total number of security groups. + TotalCount uint32 `json:"total_count"` + + // SecurityGroups: list of security groups. + SecurityGroups []*SecurityGroup `json:"security_groups"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListSecurityGroupsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListSecurityGroupsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListSecurityGroupsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.SecurityGroups = append(r.SecurityGroups, results.SecurityGroups...) + r.TotalCount += uint32(len(results.SecurityGroups)) + return uint32(len(results.SecurityGroups)), nil +} + +// ListServerActionsRequest: list server actions request. +type ListServerActionsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + ServerID string `json:"-"` +} + +// ListServerActionsResponse: list server actions response. +type ListServerActionsResponse struct { + Actions []ServerAction `json:"actions"` +} + +// ListServerUserDataRequest: list server user data request. +type ListServerUserDataRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ServerID: UUID of the Instance. + ServerID string `json:"-"` +} + +// ListServerUserDataResponse: list server user data response. +type ListServerUserDataResponse struct { + UserData []string `json:"user_data"` +} + +// ListServersRequest: list servers request. +type ListServersRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // PerPage: a positive integer lower or equal to 100 to select the number of items to return. + PerPage *uint32 `json:"-"` + + // Page: a positive integer to choose the page to return. + Page *int32 `json:"-"` + + // Organization: list only Instances of this Organization ID. + Organization *string `json:"-"` + + // Project: list only Instances of this Project ID. + Project *string `json:"-"` + + // Name: filter Instances by name (eg. "server1" will return "server100" and "server1" but not "foo"). + Name *string `json:"-"` + + // PrivateIP: list Instances by private_ip. + PrivateIP *net.IP `json:"-"` + + // WithoutIP: list Instances that are not attached to a public IP. + WithoutIP *bool `json:"-"` + + // CommercialType: list Instances of this commercial type. + CommercialType *string `json:"-"` + + // State: list Instances in this state. + // Default value: running + State *ServerState `json:"-"` + + // Tags: list Instances with these exact tags (to filter with several tags, use commas to separate them). + Tags []string `json:"-"` + + // PrivateNetwork: list Instances in this Private Network. + PrivateNetwork *string `json:"-"` + + // Order: define the order of the returned servers. + // Default value: creation_date_desc + Order ListServersRequestOrder `json:"-"` + + // PrivateNetworks: list Instances from the given Private Networks (use commas to separate them). + PrivateNetworks []string `json:"-"` + + // PrivateNicMacAddress: list Instances associated with the given private NIC MAC address. + PrivateNicMacAddress *string `json:"-"` + + // Servers: list Instances from these server ids (use commas to separate them). + Servers []string `json:"-"` +} + +// ListServersResponse: list servers response. +type ListServersResponse struct { + // TotalCount: total number of Instances. + TotalCount uint32 `json:"total_count"` + + // Servers: list of Instances. + Servers []*Server `json:"servers"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListServersResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListServersResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListServersResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Servers = append(r.Servers, results.Servers...) + r.TotalCount += uint32(len(results.Servers)) + return uint32(len(results.Servers)), nil +} + +// ListServersTypesRequest: list servers types request. +type ListServersTypesRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + PerPage *uint32 `json:"-"` + + Page *int32 `json:"-"` +} + +// ListServersTypesResponse: list servers types response. +type ListServersTypesResponse struct { + // TotalCount: total number of Instance types. + TotalCount uint32 `json:"total_count"` + + // Servers: list of Instance types. + Servers map[string]*ServerType `json:"servers"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListServersTypesResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListServersTypesResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListServersTypesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + if r.Servers == nil { + r.Servers = make(map[string]*ServerType) + } + for k, v := range results.Servers { + r.Servers[k] = v + } + r.TotalCount += uint32(len(results.Servers)) + return uint32(len(results.Servers)), nil +} + +// ListSnapshotsRequest: list snapshots request. +type ListSnapshotsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Organization: list snapshots only for this Organization ID. + Organization *string `json:"-"` + + // Project: list snapshots only for this Project ID. + Project *string `json:"-"` + + // PerPage: number of snapshots returned per page (positive integer lower or equal to 100). + PerPage *uint32 `json:"-"` + + // Page: page to be returned. + Page *int32 `json:"-"` + + // Name: list snapshots of the requested name. + Name *string `json:"-"` + + // Tags: list snapshots that have the requested tag. + Tags *string `json:"-"` + + // BaseVolumeID: list snapshots originating only from this volume. + BaseVolumeID *string `json:"-"` +} + +// ListSnapshotsResponse: list snapshots response. +type ListSnapshotsResponse struct { + // TotalCount: total number of snapshots. + TotalCount uint32 `json:"total_count"` + + // Snapshots: list of snapshots. + Snapshots []*Snapshot `json:"snapshots"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListSnapshotsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListSnapshotsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListSnapshotsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Snapshots = append(r.Snapshots, results.Snapshots...) + r.TotalCount += uint32(len(results.Snapshots)) + return uint32(len(results.Snapshots)), nil +} + +// ListVolumesRequest: list volumes request. +type ListVolumesRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // VolumeType: filter by volume type. + // Default value: l_ssd + VolumeType *VolumeVolumeType `json:"-"` + + // PerPage: a positive integer lower or equal to 100 to select the number of items to return. + PerPage *uint32 `json:"-"` + + // Page: a positive integer to choose the page to return. + Page *int32 `json:"-"` + + // Organization: filter volume by Organization ID. + Organization *string `json:"-"` + + // Project: filter volume by Project ID. + Project *string `json:"-"` + + // Tags: filter volumes with these exact tags (to filter with several tags, use commas to separate them). + Tags []string `json:"-"` + + // Name: filter volume by name (for eg. "vol" will return "myvolume" but not "data"). + Name *string `json:"-"` +} + +// ListVolumesResponse: list volumes response. +type ListVolumesResponse struct { + // TotalCount: total number of volumes. + TotalCount uint32 `json:"total_count"` + + // Volumes: list of volumes. + Volumes []*Volume `json:"volumes"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListVolumesResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListVolumesResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListVolumesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Volumes = append(r.Volumes, results.Volumes...) + r.TotalCount += uint32(len(results.Volumes)) + return uint32(len(results.Volumes)), nil +} + +// ListVolumesTypesRequest: list volumes types request. +type ListVolumesTypesRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + PerPage *uint32 `json:"-"` + + Page *int32 `json:"-"` +} + +// ListVolumesTypesResponse: list volumes types response. +type ListVolumesTypesResponse struct { + // TotalCount: total number of volume types. + TotalCount uint32 `json:"total_count"` + + // Volumes: map of volume types. + Volumes map[string]*VolumeType `json:"volumes"` +} + +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListVolumesTypesResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListVolumesTypesResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListVolumesTypesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + if r.Volumes == nil { + r.Volumes = make(map[string]*VolumeType) + } + for k, v := range results.Volumes { + r.Volumes[k] = v + } + r.TotalCount += uint32(len(results.Volumes)) + return uint32(len(results.Volumes)), nil +} + +// MigrationPlan: migration plan. +type MigrationPlan struct { + // Volume: a volume which will be migrated to SBS together with the snapshots, if present. + Volume *Volume `json:"volume"` + + // Snapshots: a list of snapshots which will be migrated to SBS together and with the volume, if present. + Snapshots []*Snapshot `json:"snapshots"` + + // ValidationKey: a value to be passed to ApplyBlockMigrationRequest, to confirm that the execution of the plan is being requested. + ValidationKey string `json:"validation_key"` +} + +// PlanBlockMigrationRequest: plan block migration request. +type PlanBlockMigrationRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // VolumeID: the volume for which the migration plan will be generated. + // Precisely one of VolumeID, SnapshotID must be set. + VolumeID *string `json:"volume_id,omitempty"` + + // SnapshotID: the snapshot for which the migration plan will be generated. + // Precisely one of VolumeID, SnapshotID must be set. + SnapshotID *string `json:"snapshot_id,omitempty"` +} + +// ServerActionRequest: server action request. +type ServerActionRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ServerID: UUID of the Instance. + ServerID string `json:"-"` + + // Action: action to perform on the Instance. + // Default value: poweron + Action ServerAction `json:"action,omitempty"` + + // Name: name of the backup you want to create. + // This field should only be specified when performing a backup action. + Name *string `json:"name,omitempty"` + + // Volumes: for each volume UUID, the snapshot parameters of the volume. + // This field should only be specified when performing a backup action. + Volumes map[string]*ServerActionRequestVolumeBackupTemplate `json:"volumes,omitempty"` +} + +// ServerActionResponse: server action response. +type ServerActionResponse struct { + Task *Task `json:"task"` +} + +// SetImageRequest: set image request. +type SetImageRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + ID string `json:"-"` Name string `json:"name"` - // Arch: default value: x86_64 + + // Arch: default value: unknown_arch Arch Arch `json:"arch"` - CreationDate *time.Time `json:"creation_date"` + CreationDate *time.Time `json:"creation_date,omitempty"` + + ModificationDate *time.Time `json:"modification_date,omitempty"` - ModificationDate *time.Time `json:"modification_date"` // Deprecated - DefaultBootscript *Bootscript `json:"default_bootscript"` + DefaultBootscript *Bootscript `json:"default_bootscript,omitempty"` ExtraVolumes map[string]*Volume `json:"extra_volumes"` @@ -1091,769 +3285,632 @@ type Image struct { Public bool `json:"public"` - RootVolume *VolumeSummary `json:"root_volume"` + RootVolume *VolumeSummary `json:"root_volume,omitempty"` + // State: default value: available State ImageState `json:"state"` Project string `json:"project"` - Tags []string `json:"tags"` - - Zone scw.Zone `json:"zone"` + Tags *[]string `json:"tags,omitempty"` } -// ListBootscriptsResponse: list bootscripts response. -type ListBootscriptsResponse struct { - // TotalCount: total number of bootscripts. - TotalCount uint32 `json:"total_count"` - // Bootscripts: list of bootscripts. - Bootscripts []*Bootscript `json:"bootscripts"` -} +// SetPlacementGroupRequest: set placement group request. +type SetPlacementGroupRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` -// ListIPsResponse: list ips response. -type ListIPsResponse struct { - // TotalCount: total number of ips. - TotalCount uint32 `json:"total_count"` - // IPs: list of ips. - IPs []*IP `json:"ips"` -} + PlacementGroupID string `json:"-"` -// ListImagesResponse: list images response. -type ListImagesResponse struct { - // TotalCount: total number of images. - TotalCount uint32 `json:"total_count"` - // Images: list of images. - Images []*Image `json:"images"` -} - -// ListPlacementGroupsResponse: list placement groups response. -type ListPlacementGroupsResponse struct { - // TotalCount: total number of placement groups. - TotalCount uint32 `json:"total_count"` - // PlacementGroups: list of placement groups. - PlacementGroups []*PlacementGroup `json:"placement_groups"` -} - -type ListPrivateNICsResponse struct { - PrivateNics []*PrivateNIC `json:"private_nics"` - - TotalCount uint64 `json:"total_count"` -} - -// ListSecurityGroupRulesResponse: list security group rules response. -type ListSecurityGroupRulesResponse struct { - // TotalCount: total number of security groups. - TotalCount uint32 `json:"total_count"` - // Rules: list of security rules. - Rules []*SecurityGroupRule `json:"rules"` -} - -// ListSecurityGroupsResponse: list security groups response. -type ListSecurityGroupsResponse struct { - // TotalCount: total number of security groups. - TotalCount uint32 `json:"total_count"` - // SecurityGroups: list of security groups. - SecurityGroups []*SecurityGroup `json:"security_groups"` -} - -type ListServerActionsResponse struct { - Actions []ServerAction `json:"actions"` -} - -type ListServerUserDataResponse struct { - UserData []string `json:"user_data"` -} - -// ListServersResponse: list servers response. -type ListServersResponse struct { - // TotalCount: total number of Instances. - TotalCount uint32 `json:"total_count"` - // Servers: list of Instances. - Servers []*Server `json:"servers"` -} - -// ListServersTypesResponse: list servers types response. -type ListServersTypesResponse struct { - // TotalCount: total number of Instance types. - TotalCount uint32 `json:"total_count"` - // Servers: list of Instance types. - Servers map[string]*ServerType `json:"servers"` -} - -// ListSnapshotsResponse: list snapshots response. -type ListSnapshotsResponse struct { - // TotalCount: total number of snapshots. - TotalCount uint32 `json:"total_count"` - // Snapshots: list of snapshots. - Snapshots []*Snapshot `json:"snapshots"` -} - -// ListVolumesResponse: list volumes response. -type ListVolumesResponse struct { - // TotalCount: total number of volumes. - TotalCount uint32 `json:"total_count"` - // Volumes: list of volumes. - Volumes []*Volume `json:"volumes"` -} - -// ListVolumesTypesResponse: list volumes types response. -type ListVolumesTypesResponse struct { - // TotalCount: total number of volume types. - TotalCount uint32 `json:"total_count"` - // Volumes: map of volume types. - Volumes map[string]*VolumeType `json:"volumes"` -} - -// MigrationPlan: migration plan. -type MigrationPlan struct { - // Volume: a volume which will be migrated to SBS together with the snapshots, if present. - Volume *Volume `json:"volume"` - // Snapshots: a list of snapshots which will be migrated to SBS together and with the volume, if present. - Snapshots []*Snapshot `json:"snapshots"` - // ValidationKey: a value to be passed to ApplyBlockMigrationRequest, to confirm that the execution of the plan is being requested. - ValidationKey string `json:"validation_key"` -} - -type NullableStringValue struct { - Null bool `json:"null,omitempty"` - - Value string `json:"value,omitempty"` -} - -// PlacementGroup: placement group. -type PlacementGroup struct { - // ID: placement group unique ID. - ID string `json:"id"` - // Name: placement group name. Name string `json:"name"` - // Organization: placement group Organization ID. + Organization string `json:"organization"` - // Project: placement group Project ID. - Project string `json:"project"` - // Tags: placement group tags. - Tags []string `json:"tags"` - // PolicyMode: select the failure mode when the placement cannot be respected, either optional or enforced. - // Default value: optional + + // PolicyMode: default value: optional PolicyMode PlacementGroupPolicyMode `json:"policy_mode"` - // PolicyType: select the behavior of the placement group, either low_latency (group) or max_availability (spread). - // Default value: max_availability + + // PolicyType: default value: max_availability PolicyType PlacementGroupPolicyType `json:"policy_type"` - // PolicyRespected: returns true if the policy is respected, false otherwise. - PolicyRespected bool `json:"policy_respected"` - // Zone: zone in which the placement group is located. - Zone scw.Zone `json:"zone"` -} -// PlacementGroupServer: placement group server. -type PlacementGroupServer struct { - // ID: instance UUID. - ID string `json:"id"` - // Name: instance name. - Name string `json:"name"` - // PolicyRespected: defines whether the placement group policy is respected (either 1 or 0). - PolicyRespected bool `json:"policy_respected"` -} - -// PrivateNIC: private nic. -type PrivateNIC struct { - // ID: private NIC unique ID. - ID string `json:"id,omitempty"` - // ServerID: instance to which the private NIC is attached. - ServerID string `json:"server_id,omitempty"` - // PrivateNetworkID: private Network the private NIC is attached to. - PrivateNetworkID string `json:"private_network_id,omitempty"` - // MacAddress: private NIC MAC address. - MacAddress string `json:"mac_address,omitempty"` - // State: private NIC state. - // Default value: available - State PrivateNICState `json:"state,omitempty"` - // Tags: private NIC tags. - Tags []string `json:"tags,omitempty"` -} - -// SecurityGroup: security group. -type SecurityGroup struct { - // ID: security group unique ID. - ID string `json:"id"` - // Name: security group name. - Name string `json:"name"` - // Description: security group description. - Description string `json:"description"` - // EnableDefaultSecurity: true if SMTP is blocked on IPv4 and IPv6. This feature is read only, please open a support ticket if you need to make it configurable. - EnableDefaultSecurity bool `json:"enable_default_security"` - // InboundDefaultPolicy: default inbound policy. - // Default value: accept - InboundDefaultPolicy SecurityGroupPolicy `json:"inbound_default_policy"` - // OutboundDefaultPolicy: default outbound policy. - // Default value: accept - OutboundDefaultPolicy SecurityGroupPolicy `json:"outbound_default_policy"` - // Organization: security group Organization ID. - Organization string `json:"organization"` - // Project: security group Project ID. Project string `json:"project"` - // Tags: security group tags. - Tags []string `json:"tags"` - // Deprecated: OrganizationDefault: true if it is your default security group for this Organization ID. - OrganizationDefault *bool `json:"organization_default,omitempty"` - // ProjectDefault: true if it is your default security group for this Project ID. - ProjectDefault bool `json:"project_default"` - // CreationDate: security group creation date. - CreationDate *time.Time `json:"creation_date"` - // ModificationDate: security group modification date. - ModificationDate *time.Time `json:"modification_date"` - // Servers: list of Instances attached to this security group. - Servers []*ServerSummary `json:"servers"` - // Stateful: defines whether the security group is stateful. - Stateful bool `json:"stateful"` - // State: security group state. - // Default value: available - State SecurityGroupState `json:"state"` - // Zone: zone in which the security group is located. - Zone scw.Zone `json:"zone"` -} - -type SecurityGroupRule struct { - ID string `json:"id"` - // Protocol: default value: TCP - Protocol SecurityGroupRuleProtocol `json:"protocol"` - // Direction: default value: inbound - Direction SecurityGroupRuleDirection `json:"direction"` - // Action: default value: accept - Action SecurityGroupRuleAction `json:"action"` - - IPRange scw.IPNet `json:"ip_range"` - - DestPortFrom *uint32 `json:"dest_port_from"` - - DestPortTo *uint32 `json:"dest_port_to"` - - Position uint32 `json:"position"` - - Editable bool `json:"editable"` - - Zone scw.Zone `json:"zone"` -} - -type SecurityGroupSummary struct { - ID string `json:"id"` - - Name string `json:"name"` -} - -type SecurityGroupTemplate struct { - ID string `json:"id,omitempty"` - - Name string `json:"name,omitempty"` -} - -// Server: server. -type Server struct { - // ID: instance unique ID. - ID string `json:"id"` - // Name: instance name. - Name string `json:"name"` - // Organization: instance Organization ID. - Organization string `json:"organization"` - // Project: instance Project ID. - Project string `json:"project"` - // AllowedActions: list of allowed actions on the Instance. - AllowedActions []ServerAction `json:"allowed_actions"` - // Tags: tags associated with the Instance. - Tags []string `json:"tags"` - // CommercialType: instance commercial type (eg. GP1-M). - CommercialType string `json:"commercial_type"` - // CreationDate: instance creation date. - CreationDate *time.Time `json:"creation_date"` - // DynamicIPRequired: true if a dynamic IPv4 is required. - DynamicIPRequired bool `json:"dynamic_ip_required"` - // RoutedIPEnabled: true to configure the instance so it uses the new routed IP mode. - RoutedIPEnabled bool `json:"routed_ip_enabled"` - // EnableIPv6: true if IPv6 is enabled. - EnableIPv6 bool `json:"enable_ipv6"` - // Hostname: instance host name. - Hostname string `json:"hostname"` - // Image: information about the Instance image. - Image *Image `json:"image"` - // Protected: defines whether the Instance protection option is activated. - Protected bool `json:"protected"` - // PrivateIP: private IP address of the Instance. - PrivateIP *string `json:"private_ip"` - // PublicIP: information about the public IP. - PublicIP *ServerIP `json:"public_ip"` - // PublicIPs: information about all the public IPs attached to the server. - PublicIPs []*ServerIP `json:"public_ips"` - // MacAddress: the server's MAC address. - MacAddress string `json:"mac_address"` - // ModificationDate: instance modification date. - ModificationDate *time.Time `json:"modification_date"` - // State: instance state. - // Default value: running - State ServerState `json:"state"` - // Location: instance location. - Location *ServerLocation `json:"location"` - // IPv6: instance IPv6 address. - IPv6 *ServerIPv6 `json:"ipv6"` - // Deprecated: Bootscript: instance bootscript. - Bootscript *Bootscript `json:"bootscript,omitempty"` - // BootType: instance boot type. - // Default value: local - BootType BootType `json:"boot_type"` - // Volumes: instance volumes. - Volumes map[string]*VolumeServer `json:"volumes"` - // SecurityGroup: instance security group. - SecurityGroup *SecurityGroupSummary `json:"security_group"` - // Maintenances: instance planned maintenance. - Maintenances []*ServerMaintenance `json:"maintenances"` - // StateDetail: detailed information about the Instance state. - StateDetail string `json:"state_detail"` - // Arch: instance architecture. - // Default value: x86_64 - Arch Arch `json:"arch"` - // PlacementGroup: instance placement group. - PlacementGroup *PlacementGroup `json:"placement_group"` - // PrivateNics: instance private NICs. - PrivateNics []*PrivateNIC `json:"private_nics"` - // Zone: zone in which the Instance is located. - Zone scw.Zone `json:"zone"` -} - -// ServerActionRequestVolumeBackupTemplate: server action request. volume backup template. -type ServerActionRequestVolumeBackupTemplate struct { - // VolumeType: snapshot's volume type. - // Overrides the `volume_type` of the snapshot for this volume. - // If omitted, the volume type of the original volume will be used. - // Default value: unknown_volume_type - VolumeType SnapshotVolumeType `json:"volume_type,omitempty"` -} - -type ServerActionResponse struct { - Task *Task `json:"task"` -} - -// ServerIP: server. ip. -type ServerIP struct { - // ID: unique ID of the IP address. - ID string `json:"id,omitempty"` - // Address: instance's public IP-Address. - Address net.IP `json:"address,omitempty"` - // Gateway: gateway's IP address. - Gateway net.IP `json:"gateway,omitempty"` - // Netmask: cIDR netmask. - Netmask string `json:"netmask,omitempty"` - // Family: IP address family (inet or inet6). - // Default value: inet - Family ServerIPIPFamily `json:"family,omitempty"` - // Dynamic: true if the IP address is dynamic. - Dynamic bool `json:"dynamic,omitempty"` - // ProvisioningMode: information about this address provisioning mode. - // Default value: manual - ProvisioningMode ServerIPProvisioningMode `json:"provisioning_mode,omitempty"` -} - -// ServerIPv6: server. ipv6. -type ServerIPv6 struct { - // Address: instance IPv6 IP-Address. - Address net.IP `json:"address"` - // Gateway: iPv6 IP-addresses gateway. - Gateway net.IP `json:"gateway"` - // Netmask: iPv6 IP-addresses CIDR netmask. - Netmask string `json:"netmask"` -} - -type ServerLocation struct { - ClusterID string `json:"cluster_id"` - - HypervisorID string `json:"hypervisor_id"` - - NodeID string `json:"node_id"` - - PlatformID string `json:"platform_id"` - - ZoneID string `json:"zone_id"` -} - -type ServerMaintenance struct { - Reason string `json:"reason"` -} - -type ServerSummary struct { - ID string `json:"id"` - - Name string `json:"name"` -} - -// ServerType: server type. -type ServerType struct { - // Deprecated: MonthlyPrice: estimated monthly price, for a 30 days month, in Euro. - MonthlyPrice *float32 `json:"monthly_price,omitempty"` - // HourlyPrice: hourly price in Euro. - HourlyPrice float32 `json:"hourly_price"` - // AltNames: alternative Instance name, if any. - AltNames []string `json:"alt_names"` - // PerVolumeConstraint: additional volume constraints. - PerVolumeConstraint *ServerTypeVolumeConstraintsByType `json:"per_volume_constraint"` - // VolumesConstraint: initial volume constraints. - VolumesConstraint *ServerTypeVolumeConstraintSizes `json:"volumes_constraint"` - // Ncpus: number of CPU. - Ncpus uint32 `json:"ncpus"` - // Gpu: number of GPU. - Gpu *uint64 `json:"gpu"` - // RAM: available RAM in bytes. - RAM uint64 `json:"ram"` - // Arch: CPU architecture. - // Default value: x86_64 - Arch Arch `json:"arch"` - // Baremetal: true if it is a baremetal Instance. - Baremetal bool `json:"baremetal"` - // Network: network available for the Instance. - Network *ServerTypeNetwork `json:"network"` - // Capabilities: capabilities. - Capabilities *ServerTypeCapabilities `json:"capabilities"` - // ScratchStorageMaxSize: maximum available scratch storage. - ScratchStorageMaxSize *scw.Size `json:"scratch_storage_max_size"` -} - -// ServerTypeCapabilities: server type. capabilities. -type ServerTypeCapabilities struct { - // BlockStorage: defines whether the Instance supports block storage. - BlockStorage *bool `json:"block_storage"` - // BootTypes: list of supported boot types. - BootTypes []BootType `json:"boot_types"` -} - -// ServerTypeNetwork: server type. network. -type ServerTypeNetwork struct { - // Interfaces: list of available network interfaces. - Interfaces []*ServerTypeNetworkInterface `json:"interfaces"` - // SumInternalBandwidth: total maximum internal bandwidth in bits per seconds. - SumInternalBandwidth *uint64 `json:"sum_internal_bandwidth"` - // SumInternetBandwidth: total maximum internet bandwidth in bits per seconds. - SumInternetBandwidth *uint64 `json:"sum_internet_bandwidth"` - // IPv6Support: true if IPv6 is enabled. - IPv6Support bool `json:"ipv6_support"` -} - -// ServerTypeNetworkInterface: server type. network. interface. -type ServerTypeNetworkInterface struct { - // InternalBandwidth: maximum internal bandwidth in bits per seconds. - InternalBandwidth *uint64 `json:"internal_bandwidth"` - // InternetBandwidth: maximum internet bandwidth in bits per seconds. - InternetBandwidth *uint64 `json:"internet_bandwidth"` -} - -// ServerTypeVolumeConstraintSizes: server type. volume constraint sizes. -type ServerTypeVolumeConstraintSizes struct { - // MinSize: minimum volume size in bytes. - MinSize scw.Size `json:"min_size"` - // MaxSize: maximum volume size in bytes. - MaxSize scw.Size `json:"max_size"` -} - -// ServerTypeVolumeConstraintsByType: server type. volume constraints by type. -type ServerTypeVolumeConstraintsByType struct { - // LSSD: local SSD volumes. - LSSD *ServerTypeVolumeConstraintSizes `json:"l_ssd"` + + Tags *[]string `json:"tags,omitempty"` } +// SetPlacementGroupResponse: set placement group response. type SetPlacementGroupResponse struct { PlacementGroup *PlacementGroup `json:"placement_group"` } +// SetPlacementGroupServersRequest: set placement group servers request. +type SetPlacementGroupServersRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // PlacementGroupID: UUID of the placement group you want to set. + PlacementGroupID string `json:"-"` + + // Servers: an array of the Instances' UUIDs you want to configure. + Servers []string `json:"servers"` +} + // SetPlacementGroupServersResponse: set placement group servers response. type SetPlacementGroupServersResponse struct { // Servers: instances attached to the placement group. Servers []*PlacementGroupServer `json:"servers"` } -// SetSecurityGroupRulesRequestRule: set security group rules request. rule. -type SetSecurityGroupRulesRequestRule struct { - // ID: UUID of the security rule to update. If no value is provided, a new rule will be created. - ID *string `json:"id"` - // Action: action to apply when the rule matches a packet. - // Default value: accept - Action SecurityGroupRuleAction `json:"action"` - // Protocol: protocol family this rule applies to. - // Default value: TCP - Protocol SecurityGroupRuleProtocol `json:"protocol"` - // Direction: direction the rule applies to. - // Default value: inbound - Direction SecurityGroupRuleDirection `json:"direction"` - // IPRange: range of IP addresses these rules apply to. - IPRange scw.IPNet `json:"ip_range"` - // DestPortFrom: beginning of the range of ports this rule applies to (inclusive). This value will be set to null if protocol is ICMP or ANY. - DestPortFrom *uint32 `json:"dest_port_from"` - // DestPortTo: end of the range of ports this rule applies to (inclusive). This value will be set to null if protocol is ICMP or ANY, or if it is equal to dest_port_from. - DestPortTo *uint32 `json:"dest_port_to"` - // Position: position of this rule in the security group rules list. If several rules are passed with the same position, the resulting order is undefined. - Position uint32 `json:"position"` - // Editable: indicates if this rule is editable. Rules with the value false will be ignored. - Editable *bool `json:"editable"` - // Zone: zone of the rule. This field is ignored. - Zone *scw.Zone `json:"zone"` +// SetSecurityGroupRulesRequest: set security group rules request. +type SetSecurityGroupRulesRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SecurityGroupID: UUID of the security group to update the rules on. + SecurityGroupID string `json:"-"` + + // Rules: list of rules to update in the security group. + Rules []*SetSecurityGroupRulesRequestRule `json:"rules"` } +// SetSecurityGroupRulesResponse: set security group rules response. type SetSecurityGroupRulesResponse struct { Rules []*SecurityGroupRule `json:"rules"` } -// Snapshot: snapshot. -type Snapshot struct { - // ID: snapshot ID. - ID string `json:"id"` - // Name: snapshot name. - Name string `json:"name"` - // Organization: snapshot Organization ID. - Organization string `json:"organization"` - // Project: snapshot Project ID. - Project string `json:"project"` - // Tags: snapshot tags. - Tags []string `json:"tags"` - // VolumeType: snapshot volume type. - // Default value: l_ssd - VolumeType VolumeVolumeType `json:"volume_type"` - // Size: snapshot size. - Size scw.Size `json:"size"` - // State: snapshot state. - // Default value: available - State SnapshotState `json:"state"` - // BaseVolume: volume on which the snapshot is based on. - BaseVolume *SnapshotBaseVolume `json:"base_volume"` - // CreationDate: snapshot creation date. - CreationDate *time.Time `json:"creation_date"` - // ModificationDate: snapshot modification date. - ModificationDate *time.Time `json:"modification_date"` - // Zone: snapshot zone. - Zone scw.Zone `json:"zone"` - // ErrorReason: reason for the failed snapshot import. - ErrorReason *string `json:"error_reason"` -} - -// SnapshotBaseVolume: snapshot. base volume. -type SnapshotBaseVolume struct { - // ID: volume ID on which the snapshot is based. - ID string `json:"id"` - // Name: volume name on which the snapshot is based on. - Name string `json:"name"` -} - -// Task: task. -type Task struct { - // ID: unique ID of the task. - ID string `json:"id"` - // Description: description of the task. - Description string `json:"description"` - // Progress: progress of the task in percent. - Progress int32 `json:"progress"` - // StartedAt: task start date. - StartedAt *time.Time `json:"started_at"` - // TerminatedAt: task end date. - TerminatedAt *time.Time `json:"terminated_at"` - // Status: task status. - // Default value: pending - Status TaskStatus `json:"status"` - - HrefFrom string `json:"href_from"` - - HrefResult string `json:"href_result"` - // Zone: zone in which the task is excecuted. - Zone scw.Zone `json:"zone"` +// UpdateIPRequest: update ip request. +type UpdateIPRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // IP: IP ID or IP address. + IP string `json:"-"` + + // Reverse: reverse domain name. + Reverse *NullableStringValue `json:"reverse,omitempty"` + + // Type: convert a 'nat' IP to a 'routed_ipv4'. + // Default value: unknown_iptype + Type IPType `json:"type,omitempty"` + + // Tags: an array of keywords you want to tag this IP with. + Tags *[]string `json:"tags,omitempty"` + + Server *NullableStringValue `json:"server,omitempty"` } +// UpdateIPResponse: update ip response. type UpdateIPResponse struct { IP *IP `json:"ip"` } +// UpdateImageRequest: update image request. +type UpdateImageRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ImageID: UUID of the image. + ImageID string `json:"-"` + + // Name: name of the image. + Name *string `json:"name,omitempty"` + + // Arch: architecture of the image. + // Default value: unknown_arch + Arch Arch `json:"arch,omitempty"` + + // ExtraVolumes: additional snapshots of the image, with extra_volumeKey being the position of the snapshot in the image. + ExtraVolumes map[string]*VolumeImageUpdateTemplate `json:"extra_volumes,omitempty"` + + // Tags: tags of the image. + Tags *[]string `json:"tags,omitempty"` + + // Public: true to set the image as public. + Public *bool `json:"public,omitempty"` +} + +// UpdateImageResponse: update image response. +type UpdateImageResponse struct { + Image *Image `json:"image"` +} + +// UpdatePlacementGroupRequest: update placement group request. +type UpdatePlacementGroupRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // PlacementGroupID: UUID of the placement group. + PlacementGroupID string `json:"-"` + + // Name: name of the placement group. + Name *string `json:"name,omitempty"` + + // Tags: tags of the placement group. + Tags *[]string `json:"tags,omitempty"` + + // PolicyMode: operating mode of the placement group. + // Default value: optional + PolicyMode *PlacementGroupPolicyMode `json:"policy_mode,omitempty"` + + // PolicyType: policy type of the placement group. + // Default value: max_availability + PolicyType *PlacementGroupPolicyType `json:"policy_type,omitempty"` +} + +// UpdatePlacementGroupResponse: update placement group response. type UpdatePlacementGroupResponse struct { PlacementGroup *PlacementGroup `json:"placement_group"` } +// UpdatePlacementGroupServersRequest: update placement group servers request. +type UpdatePlacementGroupServersRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // PlacementGroupID: UUID of the placement group you want to update. + PlacementGroupID string `json:"-"` + + // Servers: an array of the Instances' UUIDs you want to configure. + Servers []string `json:"servers,omitempty"` +} + // UpdatePlacementGroupServersResponse: update placement group servers response. type UpdatePlacementGroupServersResponse struct { // Servers: instances attached to the placement group. Servers []*PlacementGroupServer `json:"servers"` } +// UpdatePrivateNICRequest: update private nic request. +type UpdatePrivateNICRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ServerID: UUID of the Instance the private NIC will be attached to. + ServerID string `json:"-"` + + // PrivateNicID: private NIC unique ID. + PrivateNicID string `json:"-"` + + // Tags: tags used to select private NIC/s. + Tags *[]string `json:"tags,omitempty"` +} + +// UpdateSecurityGroupRequest: update security group request. +type UpdateSecurityGroupRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SecurityGroupID: UUID of the security group. + SecurityGroupID string `json:"-"` + + // Name: name of the security group. + Name *string `json:"name,omitempty"` + + // Description: description of the security group. + Description *string `json:"description,omitempty"` + + // EnableDefaultSecurity: true to block SMTP on IPv4 and IPv6. This feature is read only, please open a support ticket if you need to make it configurable. + EnableDefaultSecurity *bool `json:"enable_default_security,omitempty"` + + // InboundDefaultPolicy: default inbound policy. + // Default value: unknown_policy + InboundDefaultPolicy SecurityGroupPolicy `json:"inbound_default_policy,omitempty"` + + // Tags: tags of the security group. + Tags *[]string `json:"tags,omitempty"` + + // Deprecated: OrganizationDefault: please use project_default instead. + OrganizationDefault *bool `json:"organization_default,omitempty"` + + // ProjectDefault: true use this security group for future Instances created in this project. + ProjectDefault *bool `json:"project_default,omitempty"` + + // OutboundDefaultPolicy: default outbound policy. + // Default value: unknown_policy + OutboundDefaultPolicy SecurityGroupPolicy `json:"outbound_default_policy,omitempty"` + + // Stateful: true to set the security group as stateful. + Stateful *bool `json:"stateful,omitempty"` +} + +// UpdateSecurityGroupResponse: update security group response. +type UpdateSecurityGroupResponse struct { + SecurityGroup *SecurityGroup `json:"security_group"` +} + +// UpdateSecurityGroupRuleRequest: update security group rule request. +type UpdateSecurityGroupRuleRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SecurityGroupID: UUID of the security group. + SecurityGroupID string `json:"-"` + + // SecurityGroupRuleID: UUID of the rule. + SecurityGroupRuleID string `json:"-"` + + // Protocol: protocol family this rule applies to. + // Default value: unknown_protocol + Protocol SecurityGroupRuleProtocol `json:"protocol,omitempty"` + + // Direction: direction the rule applies to. + // Default value: unknown_direction + Direction SecurityGroupRuleDirection `json:"direction,omitempty"` + + // Action: action to apply when the rule matches a packet. + // Default value: unknown_action + Action SecurityGroupRuleAction `json:"action,omitempty"` + + // IPRange: range of IP addresses these rules apply to. + IPRange *scw.IPNet `json:"ip_range,omitempty"` + + // DestPortFrom: beginning of the range of ports this rule applies to (inclusive). If 0 is provided, unset the parameter. + DestPortFrom *uint32 `json:"dest_port_from,omitempty"` + + // DestPortTo: end of the range of ports this rule applies to (inclusive). If 0 is provided, unset the parameter. + DestPortTo *uint32 `json:"dest_port_to,omitempty"` + + // Position: position of this rule in the security group rules list. + Position *uint32 `json:"position,omitempty"` +} + +// UpdateSecurityGroupRuleResponse: update security group rule response. +type UpdateSecurityGroupRuleResponse struct { + Rule *SecurityGroupRule `json:"rule"` +} + +// UpdateServerRequest: update server request. +type UpdateServerRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ServerID: UUID of the Instance. + ServerID string `json:"-"` + + // Name: name of the Instance. + Name *string `json:"name,omitempty"` + + // BootType: default value: local + BootType *BootType `json:"boot_type,omitempty"` + + // Tags: tags of the Instance. + Tags *[]string `json:"tags,omitempty"` + + Volumes *map[string]*VolumeServerTemplate `json:"volumes,omitempty"` + + // Deprecated + Bootscript *string `json:"bootscript,omitempty"` + + DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` + + // RoutedIPEnabled: true to configure the instance so it uses the new routed IP mode (once this is set to True you cannot set it back to False). + RoutedIPEnabled *bool `json:"routed_ip_enabled,omitempty"` + + // PublicIPs: a list of reserved IP IDs to attach to the Instance. + PublicIPs *[]string `json:"public_ips,omitempty"` + + EnableIPv6 *bool `json:"enable_ipv6,omitempty"` + + Protected *bool `json:"protected,omitempty"` + + SecurityGroup *SecurityGroupTemplate `json:"security_group,omitempty"` + + // PlacementGroup: placement group ID if Instance must be part of a placement group. + PlacementGroup *NullableStringValue `json:"placement_group,omitempty"` + + // PrivateNics: instance private NICs. + PrivateNics *[]string `json:"private_nics,omitempty"` + + // CommercialType: warning: This field has some restrictions: + // - Cannot be changed if the Instance is not in `stopped` state. + // - Cannot be changed if the Instance is in a placement group. + // - Local storage requirements of the target commercial_types must be fulfilled (i.e. if an Instance has 80GB of local storage, it can be changed into a GP1-XS, which has a maximum of 150GB, but it cannot be changed into a DEV1-S, which has only 20GB). + CommercialType *string `json:"commercial_type,omitempty"` +} + +// UpdateServerResponse: update server response. type UpdateServerResponse struct { Server *Server `json:"server"` } +// UpdateSnapshotRequest: update snapshot request. +type UpdateSnapshotRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SnapshotID: UUID of the snapshot. + SnapshotID string `json:"-"` + + // Name: name of the snapshot. + Name *string `json:"name,omitempty"` + + // Tags: tags of the snapshot. + Tags *[]string `json:"tags,omitempty"` +} + +// UpdateSnapshotResponse: update snapshot response. +type UpdateSnapshotResponse struct { + Snapshot *Snapshot `json:"snapshot"` +} + +// UpdateVolumeRequest: update volume request. +type UpdateVolumeRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // VolumeID: UUID of the volume. + VolumeID string `json:"-"` + + // Name: volume name. + Name *string `json:"name,omitempty"` + + // Tags: tags of the volume. + Tags *[]string `json:"tags,omitempty"` + + // Size: volume disk size, must be a multiple of 512. + Size *scw.Size `json:"size,omitempty"` +} + +// UpdateVolumeResponse: update volume response. type UpdateVolumeResponse struct { Volume *Volume `json:"volume"` } -// Volume: volume. -type Volume struct { - // ID: volume unique ID. - ID string `json:"id"` - // Name: volume name. - Name string `json:"name"` - // Deprecated: ExportURI: show the volume NBD export URI. - ExportURI *string `json:"export_uri"` - // Size: volume disk size. - Size scw.Size `json:"size"` - // VolumeType: volume type. - // Default value: l_ssd - VolumeType VolumeVolumeType `json:"volume_type"` - // CreationDate: volume creation date. - CreationDate *time.Time `json:"creation_date"` - // ModificationDate: volume modification date. - ModificationDate *time.Time `json:"modification_date"` - // Organization: volume Organization ID. - Organization string `json:"organization"` - // Project: volume Project ID. - Project string `json:"project"` - // Tags: volume tags. - Tags []string `json:"tags"` - // Server: instance attached to the volume. - Server *ServerSummary `json:"server"` - // State: volume state. - // Default value: available - State VolumeState `json:"state"` - // Zone: zone in which the volume is located. - Zone scw.Zone `json:"zone"` -} - -type VolumeServer struct { - ID string `json:"id"` - - Name string `json:"name"` - - ExportURI string `json:"export_uri"` - - Organization string `json:"organization"` - - Server *ServerSummary `json:"server"` - - Size scw.Size `json:"size"` - // VolumeType: default value: l_ssd - VolumeType VolumeServerVolumeType `json:"volume_type"` - - CreationDate *time.Time `json:"creation_date"` - - ModificationDate *time.Time `json:"modification_date"` - // State: default value: available - State VolumeServerState `json:"state"` - - Project string `json:"project"` - - Boot bool `json:"boot"` - - Zone scw.Zone `json:"zone"` -} - -// VolumeServerTemplate: volume server template. -type VolumeServerTemplate struct { - // ID: UUID of the volume. - ID *string `json:"id,omitempty"` - // Boot: force the Instance to boot on this volume. - // Default value: false - Boot *bool `json:"boot,omitempty"` - // Name: name of the volume. - Name *string `json:"name,omitempty"` - // Size: disk size of the volume, must be a multiple of 512. - Size *scw.Size `json:"size,omitempty"` - // VolumeType: type of the volume. - // Default value: l_ssd - VolumeType VolumeVolumeType `json:"volume_type,omitempty"` - // BaseSnapshot: ID of the snapshot on which this volume will be based. - BaseSnapshot *string `json:"base_snapshot,omitempty"` - // Organization: organization ID of the volume. - Organization *string `json:"organization,omitempty"` - // Project: project ID of the volume. - Project *string `json:"project,omitempty"` -} - -type VolumeSummary struct { - ID string `json:"id"` - - Name string `json:"name"` - - Size scw.Size `json:"size"` - // VolumeType: default value: l_ssd - VolumeType VolumeVolumeType `json:"volume_type"` -} - -// VolumeTemplate: volume template. -type VolumeTemplate struct { - // ID: UUID of the volume. - ID string `json:"id,omitempty"` - // Name: name of the volume. - Name string `json:"name,omitempty"` - // Size: disk size of the volume, must be a multiple of 512. - Size scw.Size `json:"size,omitempty"` - // VolumeType: type of the volume. - // Default value: l_ssd - VolumeType VolumeVolumeType `json:"volume_type,omitempty"` - // Deprecated: Organization: organization ID of the volume. - // Precisely one of Organization, Project must be set. - Organization *string `json:"organization,omitempty"` - // Project: project ID of the volume. - // Precisely one of Organization, Project must be set. - Project *string `json:"project,omitempty"` -} - -type VolumeType struct { - DisplayName string `json:"display_name"` - - Capabilities *VolumeTypeCapabilities `json:"capabilities"` - - Constraints *VolumeTypeConstraints `json:"constraints"` -} - -type VolumeTypeCapabilities struct { - Snapshot bool `json:"snapshot"` -} - -type VolumeTypeConstraints struct { - Min scw.Size `json:"min"` - - Max scw.Size `json:"max"` -} - // setImageResponse: set image response. type setImageResponse struct { Image *Image `json:"image"` } +// setSecurityGroupRequest: set security group request. +type setSecurityGroupRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ID: UUID of the security group. + ID string `json:"-"` + + // Name: name of the security group. + Name string `json:"name"` + + // Tags: tags of the security group. + Tags *[]string `json:"tags,omitempty"` + + // CreationDate: creation date of the security group (will be ignored). + CreationDate *time.Time `json:"creation_date,omitempty"` + + // ModificationDate: modification date of the security group (will be ignored). + ModificationDate *time.Time `json:"modification_date,omitempty"` + + // Description: description of the security group. + Description string `json:"description"` + + // EnableDefaultSecurity: true to block SMTP on IPv4 and IPv6. This feature is read only, please open a support ticket if you need to make it configurable. + EnableDefaultSecurity bool `json:"enable_default_security"` + + // InboundDefaultPolicy: default inbound policy. + // Default value: unknown_policy + InboundDefaultPolicy SecurityGroupPolicy `json:"inbound_default_policy"` + + // OutboundDefaultPolicy: default outbound policy. + // Default value: unknown_policy + OutboundDefaultPolicy SecurityGroupPolicy `json:"outbound_default_policy"` + + // Organization: security groups Organization ID. + Organization string `json:"organization"` + + // Project: security group Project ID. + Project string `json:"project"` + + // Deprecated: OrganizationDefault: please use project_default instead. + OrganizationDefault *bool `json:"organization_default,omitempty"` + + // ProjectDefault: true use this security group for future Instances created in this project. + ProjectDefault bool `json:"project_default"` + + // Servers: instances attached to this security group. + Servers []*ServerSummary `json:"servers"` + + // Stateful: true to set the security group as stateful. + Stateful bool `json:"stateful"` +} + // setSecurityGroupResponse: set security group response. type setSecurityGroupResponse struct { SecurityGroup *SecurityGroup `json:"security_group"` } +// setSecurityGroupRuleRequest: set security group rule request. +type setSecurityGroupRuleRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + SecurityGroupID string `json:"-"` + + SecurityGroupRuleID string `json:"-"` + + ID string `json:"id"` + + // Protocol: default value: unknown_protocol + Protocol SecurityGroupRuleProtocol `json:"protocol"` + + // Direction: default value: unknown_direction + Direction SecurityGroupRuleDirection `json:"direction"` + + // Action: default value: unknown_action + Action SecurityGroupRuleAction `json:"action"` + + IPRange scw.IPNet `json:"ip_range"` + + DestPortFrom *uint32 `json:"dest_port_from,omitempty"` + + DestPortTo *uint32 `json:"dest_port_to,omitempty"` + + Position uint32 `json:"position"` + + Editable bool `json:"editable"` +} + // setSecurityGroupRuleResponse: set security group rule response. type setSecurityGroupRuleResponse struct { Rule *SecurityGroupRule `json:"rule"` } +// setServerRequest: set server request. +type setServerRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ID: instance unique ID. + ID string `json:"-"` + + // Name: instance name. + Name string `json:"name"` + + // Organization: instance Organization ID. + Organization string `json:"organization"` + + // Project: instance Project ID. + Project string `json:"project"` + + // AllowedActions: provide a list of allowed actions on the server. + AllowedActions []ServerAction `json:"allowed_actions"` + + // Tags: tags associated with the Instance. + Tags *[]string `json:"tags,omitempty"` + + // CommercialType: instance commercial type (eg. GP1-M). + CommercialType string `json:"commercial_type"` + + // CreationDate: instance creation date. + CreationDate *time.Time `json:"creation_date,omitempty"` + + // DynamicIPRequired: true if a dynamic IPv4 is required. + DynamicIPRequired bool `json:"dynamic_ip_required"` + + // RoutedIPEnabled: true to configure the instance so it uses the new routed IP mode (once this is set to True you cannot set it back to False). + RoutedIPEnabled *bool `json:"routed_ip_enabled,omitempty"` + + // EnableIPv6: true if IPv6 is enabled. + EnableIPv6 bool `json:"enable_ipv6"` + + // Hostname: instance host name. + Hostname string `json:"hostname"` + + // Image: provide information on the Instance image. + Image *Image `json:"image,omitempty"` + + // Protected: instance protection option is activated. + Protected bool `json:"protected"` + + // PrivateIP: instance private IP address. + PrivateIP *string `json:"private_ip,omitempty"` + + // PublicIP: information about the public IP. + PublicIP *ServerIP `json:"public_ip,omitempty"` + + // PublicIPs: information about all the public IPs attached to the server. + PublicIPs []*ServerIP `json:"public_ips"` + + // ModificationDate: instance modification date. + ModificationDate *time.Time `json:"modification_date,omitempty"` + + // State: instance state. + // Default value: running + State ServerState `json:"state"` + + // Location: instance location. + Location *ServerLocation `json:"location,omitempty"` + + // IPv6: instance IPv6 address. + IPv6 *ServerIPv6 `json:"ipv6,omitempty"` + + // Deprecated: Bootscript: instance bootscript. + Bootscript *Bootscript `json:"bootscript,omitempty"` + + // BootType: instance boot type. + // Default value: local + BootType BootType `json:"boot_type"` + + // Volumes: instance volumes. + Volumes map[string]*Volume `json:"volumes"` + + // SecurityGroup: instance security group. + SecurityGroup *SecurityGroupSummary `json:"security_group,omitempty"` + + // Maintenances: instance planned maintenances. + Maintenances []*ServerMaintenance `json:"maintenances"` + + // StateDetail: instance state_detail. + StateDetail string `json:"state_detail"` + + // Arch: instance architecture (refers to the CPU architecture used for the Instance, e.g. x86_64, arm64). + // Default value: unknown_arch + Arch Arch `json:"arch"` + + // PlacementGroup: instance placement group. + PlacementGroup *PlacementGroup `json:"placement_group,omitempty"` + + // PrivateNics: instance private NICs. + PrivateNics []*PrivateNIC `json:"private_nics"` +} + // setServerResponse: set server response. type setServerResponse struct { Server *Server `json:"server"` } +// setSnapshotRequest: set snapshot request. +type setSnapshotRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + SnapshotID string `json:"-"` + + ID string `json:"id"` + + Name string `json:"name"` + + Organization string `json:"organization"` + + // VolumeType: default value: l_ssd + VolumeType VolumeVolumeType `json:"volume_type"` + + Size scw.Size `json:"size"` + + // State: default value: available + State SnapshotState `json:"state"` + + BaseVolume *SnapshotBaseVolume `json:"base_volume,omitempty"` + + CreationDate *time.Time `json:"creation_date,omitempty"` + + ModificationDate *time.Time `json:"modification_date,omitempty"` + + Project string `json:"project"` + + Tags *[]string `json:"tags,omitempty"` +} + // setSnapshotResponse: set snapshot response. type setSnapshotResponse struct { Snapshot *Snapshot `json:"snapshot"` } -// Service API +// Instance API. +type API struct { + client *scw.Client +} -// Zones list localities the api is available in +// NewAPI returns a API object from a Scaleway client. +func NewAPI(client *scw.Client) *API { + return &API{ + client: client, + } +} func (s *API) Zones() []scw.Zone { return []scw.Zone{scw.ZoneFrPar1, scw.ZoneFrPar2, scw.ZoneFrPar3, scw.ZoneNlAms1, scw.ZoneNlAms2, scw.ZoneNlAms3, scw.ZonePlWaw1, scw.ZonePlWaw2, scw.ZonePlWaw3} } -type GetServerTypesAvailabilityRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // PerPage: a positive integer lower or equal to 100 to select the number of items to return. - // Default value: 50 - PerPage *uint32 `json:"-"` - // Page: a positive integer to choose the page to return. - Page *int32 `json:"-"` -} - -// GetServerTypesAvailability: get availability. -// Get availability for all Instance types. +// GetServerTypesAvailability: Get availability for all Instance types. func (s *API) GetServerTypesAvailability(req *GetServerTypesAvailabilityRequest, opts ...scw.RequestOption) (*GetServerTypesAvailabilityResponse, error) { var err error @@ -1862,11 +3919,6 @@ func (s *API) GetServerTypesAvailability(req *GetServerTypesAvailabilityRequest, req.Zone = defaultZone } - defaultPerPage, exist := s.client.GetDefaultPageSize() - if (req.PerPage == nil || *req.PerPage == 0) && exist { - req.PerPage = &defaultPerPage - } - query := url.Values{} parameter.AddToQuery(query, "per_page", req.PerPage) parameter.AddToQuery(query, "page", req.Page) @@ -1876,10 +3928,9 @@ func (s *API) GetServerTypesAvailability(req *GetServerTypesAvailabilityRequest, } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/products/servers/availability", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/products/servers/availability", + Query: query, } var resp GetServerTypesAvailabilityResponse @@ -1891,17 +3942,7 @@ func (s *API) GetServerTypesAvailability(req *GetServerTypesAvailabilityRequest, return &resp, nil } -type ListServersTypesRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - PerPage *uint32 `json:"-"` - - Page *int32 `json:"-"` -} - -// ListServersTypes: list Instance types. -// List available Instance types and their technical details. +// ListServersTypes: List available Instance types and their technical details. func (s *API) ListServersTypes(req *ListServersTypesRequest, opts ...scw.RequestOption) (*ListServersTypesResponse, error) { var err error @@ -1910,11 +3951,6 @@ func (s *API) ListServersTypes(req *ListServersTypesRequest, opts ...scw.Request req.Zone = defaultZone } - defaultPerPage, exist := s.client.GetDefaultPageSize() - if (req.PerPage == nil || *req.PerPage == 0) && exist { - req.PerPage = &defaultPerPage - } - query := url.Values{} parameter.AddToQuery(query, "per_page", req.PerPage) parameter.AddToQuery(query, "page", req.Page) @@ -1924,10 +3960,9 @@ func (s *API) ListServersTypes(req *ListServersTypesRequest, opts ...scw.Request } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/products/servers", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/products/servers", + Query: query, } var resp ListServersTypesResponse @@ -1939,17 +3974,7 @@ func (s *API) ListServersTypes(req *ListServersTypesRequest, opts ...scw.Request return &resp, nil } -type ListVolumesTypesRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - PerPage *uint32 `json:"-"` - - Page *int32 `json:"-"` -} - -// ListVolumesTypes: list volume types. -// List all volume types and their technical details. +// ListVolumesTypes: List all volume types and their technical details. func (s *API) ListVolumesTypes(req *ListVolumesTypesRequest, opts ...scw.RequestOption) (*ListVolumesTypesResponse, error) { var err error @@ -1958,11 +3983,6 @@ func (s *API) ListVolumesTypes(req *ListVolumesTypesRequest, opts ...scw.Request req.Zone = defaultZone } - defaultPerPage, exist := s.client.GetDefaultPageSize() - if (req.PerPage == nil || *req.PerPage == 0) && exist { - req.PerPage = &defaultPerPage - } - query := url.Values{} parameter.AddToQuery(query, "per_page", req.PerPage) parameter.AddToQuery(query, "page", req.Page) @@ -1972,10 +3992,9 @@ func (s *API) ListVolumesTypes(req *ListVolumesTypesRequest, opts ...scw.Request } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/products/volumes", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/products/volumes", + Query: query, } var resp ListVolumesTypesResponse @@ -1987,44 +4006,7 @@ func (s *API) ListVolumesTypes(req *ListVolumesTypesRequest, opts ...scw.Request return &resp, nil } -type ListServersRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // PerPage: a positive integer lower or equal to 100 to select the number of items to return. - // Default value: 50 - PerPage *uint32 `json:"-"` - // Page: a positive integer to choose the page to return. - Page *int32 `json:"-"` - // Organization: list only Instances of this Organization ID. - Organization *string `json:"-"` - // Project: list only Instances of this Project ID. - Project *string `json:"-"` - // Name: filter Instances by name (eg. "server1" will return "server100" and "server1" but not "foo"). - Name *string `json:"-"` - // PrivateIP: list Instances by private_ip. - PrivateIP *net.IP `json:"-"` - // WithoutIP: list Instances that are not attached to a public IP. - WithoutIP *bool `json:"-"` - // CommercialType: list Instances of this commercial type. - CommercialType *string `json:"-"` - // State: list Instances in this state. - // Default value: running - State *ServerState `json:"-"` - // Tags: list Instances with these exact tags (to filter with several tags, use commas to separate them). - Tags []string `json:"-"` - // PrivateNetwork: list Instances in this Private Network. - PrivateNetwork *string `json:"-"` - // Order: define the order of the returned servers. - // Default value: creation_date_desc - Order ListServersRequestOrder `json:"-"` - // PrivateNetworks: list Instances from the given Private Networks (use commas to separate them). - PrivateNetworks []string `json:"-"` - // PrivateNicMacAddress: list Instances associated with the given private NIC MAC address. - PrivateNicMacAddress *string `json:"-"` -} - -// ListServers: list all Instances. -// List all Instances in a specified Availability Zone, e.g. `fr-par-1`. +// ListServers: List all Instances in a specified Availability Zone, e.g. `fr-par-1`. func (s *API) ListServers(req *ListServersRequest, opts ...scw.RequestOption) (*ListServersResponse, error) { var err error @@ -2033,11 +4015,6 @@ func (s *API) ListServers(req *ListServersRequest, opts ...scw.RequestOption) (* req.Zone = defaultZone } - defaultPerPage, exist := s.client.GetDefaultPageSize() - if (req.PerPage == nil || *req.PerPage == 0) && exist { - req.PerPage = &defaultPerPage - } - query := url.Values{} parameter.AddToQuery(query, "per_page", req.PerPage) parameter.AddToQuery(query, "page", req.Page) @@ -2057,16 +4034,18 @@ func (s *API) ListServers(req *ListServersRequest, opts ...scw.RequestOption) (* parameter.AddToQuery(query, "private_networks", strings.Join(req.PrivateNetworks, ",")) } parameter.AddToQuery(query, "private_nic_mac_address", req.PrivateNicMacAddress) + if len(req.Servers) != 0 { + parameter.AddToQuery(query, "servers", strings.Join(req.Servers, ",")) + } if fmt.Sprint(req.Zone) == "" { return nil, errors.New("field Zone cannot be empty in request") } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers", + Query: query, } var resp ListServersResponse @@ -2078,67 +4057,26 @@ func (s *API) ListServers(req *ListServersRequest, opts ...scw.RequestOption) (* return &resp, nil } -type CreateServerRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // Name: instance name. - Name string `json:"name,omitempty"` - // DynamicIPRequired: define if a dynamic IPv4 is required for the Instance. - DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` - // RoutedIPEnabled: if true, configure the Instance so it uses the new routed IP mode. - RoutedIPEnabled *bool `json:"routed_ip_enabled,omitempty"` - // CommercialType: define the Instance commercial type (i.e. GP1-S). - CommercialType string `json:"commercial_type,omitempty"` - // Image: instance image ID or label. - Image string `json:"image,omitempty"` - // Volumes: volumes attached to the server. - Volumes map[string]*VolumeServerTemplate `json:"volumes,omitempty"` - // EnableIPv6: true if IPv6 is enabled on the server. - EnableIPv6 bool `json:"enable_ipv6,omitempty"` - // PublicIP: ID of the reserved IP to attach to the Instance. - PublicIP *string `json:"public_ip,omitempty"` - // PublicIPs: a list of reserved IP IDs to attach to the Instance. - PublicIPs *[]string `json:"public_ips,omitempty"` - // BootType: boot type to use. - // Default value: local - BootType *BootType `json:"boot_type,omitempty"` - // Deprecated: Bootscript: bootscript ID to use when `boot_type` is set to `bootscript`. - Bootscript *string `json:"bootscript,omitempty"` - // Deprecated: Organization: instance Organization ID. - // Precisely one of Organization, Project must be set. - Organization *string `json:"organization,omitempty"` - // Project: instance Project ID. - // Precisely one of Organization, Project must be set. - Project *string `json:"project,omitempty"` - // Tags: instance tags. - Tags []string `json:"tags,omitempty"` - // SecurityGroup: security group ID. - SecurityGroup *string `json:"security_group,omitempty"` - // PlacementGroup: placement group ID if Instance must be part of a placement group. - PlacementGroup *string `json:"placement_group,omitempty"` -} - -// createServer: create an Instance. -// Create a new Instance of the specified commercial type in the specified zone. Pay attention to the volumes parameter, which takes an object which can be used in different ways to achieve different behaviors. +// createServer: Create a new Instance of the specified commercial type in the specified zone. Pay attention to the volumes parameter, which takes an object which can be used in different ways to achieve different behaviors. // Get more information in the [Technical Information](#technical-information) section of the introduction. func (s *API) createServer(req *CreateServerRequest, opts ...scw.RequestOption) (*CreateServerResponse, error) { var err error + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + defaultProject, exist := s.client.GetDefaultProjectID() - if exist && req.Organization == nil && req.Project == nil { + if exist && req.Project == nil && req.Organization == nil { req.Project = &defaultProject } defaultOrganization, exist := s.client.GetDefaultOrganizationID() - if exist && req.Organization == nil && req.Project == nil { + if exist && req.Project == nil && req.Organization == nil { req.Organization = &defaultOrganization } - if req.Zone == "" { - defaultZone, _ := s.client.GetDefaultZone() - req.Zone = defaultZone - } - if req.Name == "" { req.Name = namegenerator.GetRandomName("srv") } @@ -2148,9 +4086,8 @@ func (s *API) createServer(req *CreateServerRequest, opts ...scw.RequestOption) } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers", - Headers: http.Header{}, + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers", } err = scwReq.SetBody(req) @@ -2167,15 +4104,7 @@ func (s *API) createServer(req *CreateServerRequest, opts ...scw.RequestOption) return &resp, nil } -type DeleteServerRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - ServerID string `json:"-"` -} - -// DeleteServer: delete an Instance. -// Delete the Instance with the specified ID. +// DeleteServer: Delete the Instance with the specified ID. func (s *API) DeleteServer(req *DeleteServerRequest, opts ...scw.RequestOption) error { var err error @@ -2193,9 +4122,8 @@ func (s *API) DeleteServer(req *DeleteServerRequest, opts ...scw.RequestOption) } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -2205,15 +4133,7 @@ func (s *API) DeleteServer(req *DeleteServerRequest, opts ...scw.RequestOption) return nil } -type GetServerRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ServerID: UUID of the Instance you want to get. - ServerID string `json:"-"` -} - -// GetServer: get an Instance. -// Get the details of a specified Instance. +// GetServer: Get the details of a specified Instance. func (s *API) GetServer(req *GetServerRequest, opts ...scw.RequestOption) (*GetServerResponse, error) { var err error @@ -2231,9 +4151,8 @@ func (s *API) GetServer(req *GetServerRequest, opts ...scw.RequestOption) (*GetS } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "", } var resp GetServerResponse @@ -2245,80 +4164,13 @@ func (s *API) GetServer(req *GetServerRequest, opts ...scw.RequestOption) (*GetS return &resp, nil } -type setServerRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ID: instance unique ID. - ID string `json:"-"` - // Name: instance name. - Name string `json:"name"` - // Organization: instance Organization ID. - Organization string `json:"organization"` - // Project: instance Project ID. - Project string `json:"project"` - // AllowedActions: provide a list of allowed actions on the server. - AllowedActions []ServerAction `json:"allowed_actions"` - // Tags: tags associated with the Instance. - Tags *[]string `json:"tags"` - // CommercialType: instance commercial type (eg. GP1-M). - CommercialType string `json:"commercial_type"` - // CreationDate: instance creation date. - CreationDate *time.Time `json:"creation_date"` - // DynamicIPRequired: true if a dynamic IPv4 is required. - DynamicIPRequired bool `json:"dynamic_ip_required"` - // RoutedIPEnabled: true to configure the instance so it uses the new routed IP mode (once this is set to True you cannot set it back to False). - RoutedIPEnabled *bool `json:"routed_ip_enabled"` - // EnableIPv6: true if IPv6 is enabled. - EnableIPv6 bool `json:"enable_ipv6"` - // Hostname: instance host name. - Hostname string `json:"hostname"` - // Image: provide information on the Instance image. - Image *Image `json:"image"` - // Protected: instance protection option is activated. - Protected bool `json:"protected"` - // PrivateIP: instance private IP address. - PrivateIP *string `json:"private_ip"` - // PublicIP: information about the public IP. - PublicIP *ServerIP `json:"public_ip"` - // PublicIPs: information about all the public IPs attached to the server. - PublicIPs []*ServerIP `json:"public_ips"` - // ModificationDate: instance modification date. - ModificationDate *time.Time `json:"modification_date"` - // State: instance state. - // Default value: running - State ServerState `json:"state"` - // Location: instance location. - Location *ServerLocation `json:"location"` - // IPv6: instance IPv6 address. - IPv6 *ServerIPv6 `json:"ipv6"` - // Deprecated: Bootscript: instance bootscript. - Bootscript *Bootscript `json:"bootscript"` - // BootType: instance boot type. - // Default value: local - BootType BootType `json:"boot_type"` - // Volumes: instance volumes. - Volumes map[string]*Volume `json:"volumes"` - // SecurityGroup: instance security group. - SecurityGroup *SecurityGroupSummary `json:"security_group"` - // Maintenances: instance planned maintenances. - Maintenances []*ServerMaintenance `json:"maintenances"` - // StateDetail: instance state_detail. - StateDetail string `json:"state_detail"` - // Arch: instance architecture (refers to the CPU architecture used for the Instance, e.g. x86_64, arm64). - // Default value: x86_64 - Arch Arch `json:"arch"` - // PlacementGroup: instance placement group. - PlacementGroup *PlacementGroup `json:"placement_group"` - // PrivateNics: instance private NICs. - PrivateNics []*PrivateNIC `json:"private_nics"` -} - +// setServer: func (s *API) setServer(req *setServerRequest, opts ...scw.RequestOption) (*setServerResponse, error) { var err error - if req.Project == "" { - defaultProject, _ := s.client.GetDefaultProjectID() - req.Project = defaultProject + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone } if req.Organization == "" { @@ -2326,9 +4178,9 @@ func (s *API) setServer(req *setServerRequest, opts ...scw.RequestOption) (*setS req.Organization = defaultOrganization } - if req.Zone == "" { - defaultZone, _ := s.client.GetDefaultZone() - req.Zone = defaultZone + if req.Project == "" { + defaultProject, _ := s.client.GetDefaultProjectID() + req.Project = defaultProject } if fmt.Sprint(req.Zone) == "" { @@ -2340,9 +4192,8 @@ func (s *API) setServer(req *setServerRequest, opts ...scw.RequestOption) (*setS } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ID) + "", } err = scwReq.SetBody(req) @@ -2359,47 +4210,7 @@ func (s *API) setServer(req *setServerRequest, opts ...scw.RequestOption) (*setS return &resp, nil } -type UpdateServerRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ServerID: UUID of the Instance. - ServerID string `json:"-"` - // Name: name of the Instance. - Name *string `json:"name,omitempty"` - // BootType: default value: local - BootType *BootType `json:"boot_type,omitempty"` - // Tags: tags of the Instance. - Tags *[]string `json:"tags,omitempty"` - - Volumes *map[string]*VolumeServerTemplate `json:"volumes,omitempty"` - // Deprecated - Bootscript *string `json:"bootscript,omitempty"` - - DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` - // RoutedIPEnabled: true to configure the instance so it uses the new routed IP mode (once this is set to True you cannot set it back to False). - RoutedIPEnabled *bool `json:"routed_ip_enabled,omitempty"` - - PublicIPs []*ServerIP `json:"public_ips,omitempty"` - - EnableIPv6 *bool `json:"enable_ipv6,omitempty"` - - Protected *bool `json:"protected,omitempty"` - - SecurityGroup *SecurityGroupTemplate `json:"security_group,omitempty"` - // PlacementGroup: placement group ID if Instance must be part of a placement group. - PlacementGroup *NullableStringValue `json:"placement_group,omitempty"` - // PrivateNics: instance private NICs. - PrivateNics []*PrivateNIC `json:"private_nics,omitempty"` - // CommercialType: set the commercial_type for this Instance. - // Warning: This field has some restrictions: - // - Cannot be changed if the Instance is not in `stopped` state. - // - Cannot be changed if the Instance is in a placement group. - // - Local storage requirements of the target commercial_types must be fulfilled (i.e. if an Instance has 80GB of local storage, it can be changed into a GP1-XS, which has a maximum of 150GB, but it cannot be changed into a DEV1-S, which has only 20GB). - CommercialType *string `json:"commercial_type,omitempty"` -} - -// updateServer: update an Instance. -// Update the Instance information, such as name, boot mode, or tags. +// updateServer: Update the Instance information, such as name, boot mode, or tags. func (s *API) updateServer(req *UpdateServerRequest, opts ...scw.RequestOption) (*UpdateServerResponse, error) { var err error @@ -2417,9 +4228,8 @@ func (s *API) updateServer(req *UpdateServerRequest, opts ...scw.RequestOption) } scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "", - Headers: http.Header{}, + Method: "PATCH", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "", } err = scwReq.SetBody(req) @@ -2436,15 +4246,7 @@ func (s *API) updateServer(req *UpdateServerRequest, opts ...scw.RequestOption) return &resp, nil } -type ListServerActionsRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - ServerID string `json:"-"` -} - -// ListServerActions: list Instance actions. -// List all actions (e.g. power on, power off, reboot) that can currently be performed on an Instance. +// ListServerActions: List all actions (e.g. power on, power off, reboot) that can currently be performed on an Instance. func (s *API) ListServerActions(req *ListServerActionsRequest, opts ...scw.RequestOption) (*ListServerActionsResponse, error) { var err error @@ -2462,9 +4264,8 @@ func (s *API) ListServerActions(req *ListServerActionsRequest, opts ...scw.Reque } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/action", - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/action", } var resp ListServerActionsResponse @@ -2476,26 +4277,7 @@ func (s *API) ListServerActions(req *ListServerActionsRequest, opts ...scw.Reque return &resp, nil } -type ServerActionRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ServerID: UUID of the Instance. - ServerID string `json:"-"` - // Action: action to perform on the Instance. - // Default value: poweron - Action ServerAction `json:"action"` - // Name: name of the backup you want to create. - // Name of the backup you want to create. - // This field should only be specified when performing a backup action. - Name *string `json:"name,omitempty"` - // Volumes: for each volume UUID, the snapshot parameters of the volume. - // For each volume UUID, the snapshot parameters of the volume. - // This field should only be specified when performing a backup action. - Volumes map[string]*ServerActionRequestVolumeBackupTemplate `json:"volumes,omitempty"` -} - -// ServerAction: perform action. -// Perform an action on an Instance. +// ServerAction: Perform an action on an Instance. // Available actions are: // * `poweron`: Start a stopped Instance. // * `poweroff`: Fully stop the Instance and release the hypervisor slot. @@ -2525,9 +4307,8 @@ func (s *API) ServerAction(req *ServerActionRequest, opts ...scw.RequestOption) } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/action", - Headers: http.Header{}, + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/action", } err = scwReq.SetBody(req) @@ -2544,15 +4325,7 @@ func (s *API) ServerAction(req *ServerActionRequest, opts ...scw.RequestOption) return &resp, nil } -type ListServerUserDataRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ServerID: UUID of the Instance. - ServerID string `json:"-"` -} - -// ListServerUserData: list user data. -// List all user data keys registered on a specified Instance. +// ListServerUserData: List all user data keys registered on a specified Instance. func (s *API) ListServerUserData(req *ListServerUserDataRequest, opts ...scw.RequestOption) (*ListServerUserDataResponse, error) { var err error @@ -2570,9 +4343,8 @@ func (s *API) ListServerUserData(req *ListServerUserDataRequest, opts ...scw.Req } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/user_data", - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/user_data", } var resp ListServerUserDataResponse @@ -2584,17 +4356,7 @@ func (s *API) ListServerUserData(req *ListServerUserDataRequest, opts ...scw.Req return &resp, nil } -type DeleteServerUserDataRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ServerID: UUID of the Instance. - ServerID string `json:"-"` - // Key: key of the user data to delete. - Key string `json:"-"` -} - -// DeleteServerUserData: delete user data. -// Delete the specified key from an Instance's user data. +// DeleteServerUserData: Delete the specified key from an Instance's user data. func (s *API) DeleteServerUserData(req *DeleteServerUserDataRequest, opts ...scw.RequestOption) error { var err error @@ -2616,9 +4378,8 @@ func (s *API) DeleteServerUserData(req *DeleteServerUserDataRequest, opts ...scw } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/user_data/" + fmt.Sprint(req.Key) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/user_data/" + fmt.Sprint(req.Key) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -2628,30 +4389,8 @@ func (s *API) DeleteServerUserData(req *DeleteServerUserDataRequest, opts ...scw return nil } -type ListImagesRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - Organization *string `json:"-"` - - PerPage *uint32 `json:"-"` - - Page *int32 `json:"-"` - - Name *string `json:"-"` - - Public *bool `json:"-"` - - Arch *string `json:"-"` - - Project *string `json:"-"` - - Tags *string `json:"-"` -} - -// ListImages: list Instance images. -// List all existing Instance images. -func (s *API) ListImages(req *ListImagesRequest, opts ...scw.RequestOption) (*ListImagesResponse, error) { +// AttachServerVolume: +func (s *API) AttachServerVolume(req *AttachServerVolumeRequest, opts ...scw.RequestOption) (*AttachServerVolumeResponse, error) { var err error if req.Zone == "" { @@ -2659,9 +4398,76 @@ func (s *API) ListImages(req *ListImagesRequest, opts ...scw.RequestOption) (*Li req.Zone = defaultZone } - defaultPerPage, exist := s.client.GetDefaultPageSize() - if (req.PerPage == nil || *req.PerPage == 0) && exist { - req.PerPage = &defaultPerPage + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + if fmt.Sprint(req.ServerID) == "" { + return nil, errors.New("field ServerID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/attach-volume", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp AttachServerVolumeResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// DetachServerVolume: +func (s *API) DetachServerVolume(req *DetachServerVolumeRequest, opts ...scw.RequestOption) (*DetachServerVolumeResponse, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + if fmt.Sprint(req.ServerID) == "" { + return nil, errors.New("field ServerID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/detach-volume", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp DetachServerVolumeResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// ListImages: List all existing Instance images. +func (s *API) ListImages(req *ListImagesRequest, opts ...scw.RequestOption) (*ListImagesResponse, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone } query := url.Values{} @@ -2679,10 +4485,9 @@ func (s *API) ListImages(req *ListImagesRequest, opts ...scw.RequestOption) (*Li } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/images", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/images", + Query: query, } var resp ListImagesResponse @@ -2694,15 +4499,7 @@ func (s *API) ListImages(req *ListImagesRequest, opts ...scw.RequestOption) (*Li return &resp, nil } -type GetImageRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ImageID: UUID of the image you want to get. - ImageID string `json:"-"` -} - -// GetImage: get an Instance image. -// Get details of an image with the specified ID. +// GetImage: Get details of an image with the specified ID. func (s *API) GetImage(req *GetImageRequest, opts ...scw.RequestOption) (*GetImageResponse, error) { var err error @@ -2720,9 +4517,8 @@ func (s *API) GetImage(req *GetImageRequest, opts ...scw.RequestOption) (*GetIma } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/images/" + fmt.Sprint(req.ImageID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/images/" + fmt.Sprint(req.ImageID) + "", } var resp GetImageResponse @@ -2734,52 +4530,25 @@ func (s *API) GetImage(req *GetImageRequest, opts ...scw.RequestOption) (*GetIma return &resp, nil } -type CreateImageRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // Name: name of the image. - Name string `json:"name,omitempty"` - // RootVolume: UUID of the snapshot. - RootVolume string `json:"root_volume,omitempty"` - // Arch: architecture of the image. - // Default value: x86_64 - Arch Arch `json:"arch"` - // Deprecated: DefaultBootscript: default bootscript of the image. - DefaultBootscript *string `json:"default_bootscript,omitempty"` - // ExtraVolumes: additional volumes of the image. - ExtraVolumes map[string]*VolumeTemplate `json:"extra_volumes,omitempty"` - // Deprecated: Organization: organization ID of the image. - // Precisely one of Organization, Project must be set. - Organization *string `json:"organization,omitempty"` - // Project: project ID of the image. - // Precisely one of Organization, Project must be set. - Project *string `json:"project,omitempty"` - // Tags: tags of the image. - Tags []string `json:"tags,omitempty"` - // Public: true to create a public image. - Public *bool `json:"public,omitempty"` -} - -// CreateImage: create an Instance image. -// Create an Instance image from the specified snapshot ID. +// CreateImage: Create an Instance image from the specified snapshot ID. func (s *API) CreateImage(req *CreateImageRequest, opts ...scw.RequestOption) (*CreateImageResponse, error) { var err error - defaultProject, exist := s.client.GetDefaultProjectID() - if exist && req.Organization == nil && req.Project == nil { - req.Project = &defaultProject - } - - defaultOrganization, exist := s.client.GetDefaultOrganizationID() - if exist && req.Organization == nil && req.Project == nil { - req.Organization = &defaultOrganization - } - if req.Zone == "" { defaultZone, _ := s.client.GetDefaultZone() req.Zone = defaultZone } + defaultProject, exist := s.client.GetDefaultProjectID() + if exist && req.Project == nil && req.Organization == nil { + req.Project = &defaultProject + } + + defaultOrganization, exist := s.client.GetDefaultOrganizationID() + if exist && req.Project == nil && req.Organization == nil { + req.Organization = &defaultOrganization + } + if req.Name == "" { req.Name = namegenerator.GetRandomName("img") } @@ -2789,9 +4558,8 @@ func (s *API) CreateImage(req *CreateImageRequest, opts ...scw.RequestOption) (* } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/images", - Headers: http.Header{}, + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/images", } err = scwReq.SetBody(req) @@ -2808,47 +4576,13 @@ func (s *API) CreateImage(req *CreateImageRequest, opts ...scw.RequestOption) (* return &resp, nil } -type SetImageRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - ID string `json:"-"` - - Name string `json:"name"` - // Arch: default value: x86_64 - Arch Arch `json:"arch"` - - CreationDate *time.Time `json:"creation_date"` - - ModificationDate *time.Time `json:"modification_date"` - // Deprecated - DefaultBootscript *Bootscript `json:"default_bootscript"` - - ExtraVolumes map[string]*Volume `json:"extra_volumes"` - - FromServer string `json:"from_server"` - - Organization string `json:"organization"` - - Public bool `json:"public"` - - RootVolume *VolumeSummary `json:"root_volume"` - // State: default value: available - State ImageState `json:"state"` - - Project string `json:"project"` - - Tags *[]string `json:"tags"` -} - -// setImage: update image. -// Replace all image properties with an image message. +// setImage: Replace all image properties with an image message. func (s *API) setImage(req *SetImageRequest, opts ...scw.RequestOption) (*setImageResponse, error) { var err error - if req.Project == "" { - defaultProject, _ := s.client.GetDefaultProjectID() - req.Project = defaultProject + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone } if req.Organization == "" { @@ -2856,9 +4590,9 @@ func (s *API) setImage(req *SetImageRequest, opts ...scw.RequestOption) (*setIma req.Organization = defaultOrganization } - if req.Zone == "" { - defaultZone, _ := s.client.GetDefaultZone() - req.Zone = defaultZone + if req.Project == "" { + defaultProject, _ := s.client.GetDefaultProjectID() + req.Project = defaultProject } if fmt.Sprint(req.Zone) == "" { @@ -2870,9 +4604,8 @@ func (s *API) setImage(req *SetImageRequest, opts ...scw.RequestOption) (*setIma } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/images/" + fmt.Sprint(req.ID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/images/" + fmt.Sprint(req.ID) + "", } err = scwReq.SetBody(req) @@ -2889,15 +4622,43 @@ func (s *API) setImage(req *SetImageRequest, opts ...scw.RequestOption) (*setIma return &resp, nil } -type DeleteImageRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ImageID: UUID of the image you want to delete. - ImageID string `json:"-"` +// UpdateImage: Update the properties of an image. +func (s *API) UpdateImage(req *UpdateImageRequest, opts ...scw.RequestOption) (*UpdateImageResponse, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + if fmt.Sprint(req.ImageID) == "" { + return nil, errors.New("field ImageID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "PATCH", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/images/" + fmt.Sprint(req.ImageID) + "", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp UpdateImageResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil } -// DeleteImage: delete an Instance image. -// Delete the image with the specified ID. +// DeleteImage: Delete the image with the specified ID. func (s *API) DeleteImage(req *DeleteImageRequest, opts ...scw.RequestOption) error { var err error @@ -2915,9 +4676,8 @@ func (s *API) DeleteImage(req *DeleteImageRequest, opts ...scw.RequestOption) er } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/images/" + fmt.Sprint(req.ImageID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/images/" + fmt.Sprint(req.ImageID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -2927,25 +4687,7 @@ func (s *API) DeleteImage(req *DeleteImageRequest, opts ...scw.RequestOption) er return nil } -type ListSnapshotsRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - Organization *string `json:"-"` - - PerPage *uint32 `json:"-"` - - Page *int32 `json:"-"` - - Name *string `json:"-"` - - Project *string `json:"-"` - - Tags *string `json:"-"` -} - -// ListSnapshots: list snapshots. -// List all snapshots of an Organization in a specified Availability Zone. +// ListSnapshots: List all snapshots of an Organization in a specified Availability Zone. func (s *API) ListSnapshots(req *ListSnapshotsRequest, opts ...scw.RequestOption) (*ListSnapshotsResponse, error) { var err error @@ -2954,28 +4696,23 @@ func (s *API) ListSnapshots(req *ListSnapshotsRequest, opts ...scw.RequestOption req.Zone = defaultZone } - defaultPerPage, exist := s.client.GetDefaultPageSize() - if (req.PerPage == nil || *req.PerPage == 0) && exist { - req.PerPage = &defaultPerPage - } - query := url.Values{} parameter.AddToQuery(query, "organization", req.Organization) + parameter.AddToQuery(query, "project", req.Project) parameter.AddToQuery(query, "per_page", req.PerPage) parameter.AddToQuery(query, "page", req.Page) parameter.AddToQuery(query, "name", req.Name) - parameter.AddToQuery(query, "project", req.Project) parameter.AddToQuery(query, "tags", req.Tags) + parameter.AddToQuery(query, "base_volume_id", req.BaseVolumeID) if fmt.Sprint(req.Zone) == "" { return nil, errors.New("field Zone cannot be empty in request") } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/snapshots", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/snapshots", + Query: query, } var resp ListSnapshotsResponse @@ -2987,54 +4724,25 @@ func (s *API) ListSnapshots(req *ListSnapshotsRequest, opts ...scw.RequestOption return &resp, nil } -type CreateSnapshotRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // Name: name of the snapshot. - Name string `json:"name,omitempty"` - // VolumeID: UUID of the volume. - VolumeID *string `json:"volume_id,omitempty"` - // Tags: tags of the snapshot. - Tags *[]string `json:"tags,omitempty"` - // Deprecated: Organization: organization ID of the snapshot. - // Precisely one of Organization, Project must be set. - Organization *string `json:"organization,omitempty"` - // Project: project ID of the snapshot. - // Precisely one of Organization, Project must be set. - Project *string `json:"project,omitempty"` - // VolumeType: volume type of the snapshot. - // Overrides the volume_type of the snapshot. - // If omitted, the volume type of the original volume will be used. - // Default value: unknown_volume_type - VolumeType SnapshotVolumeType `json:"volume_type"` - // Bucket: bucket name for snapshot imports. - Bucket *string `json:"bucket,omitempty"` - // Key: object key for snapshot imports. - Key *string `json:"key,omitempty"` - // Size: imported snapshot size, must be a multiple of 512. - Size *scw.Size `json:"size,omitempty"` -} - -// CreateSnapshot: create a snapshot from a specified volume or from a QCOW2 file. -// Create a snapshot from a specified volume or from a QCOW2 file in a specified Availability Zone. +// CreateSnapshot: Create a snapshot from a specified volume or from a QCOW2 file in a specified Availability Zone. func (s *API) CreateSnapshot(req *CreateSnapshotRequest, opts ...scw.RequestOption) (*CreateSnapshotResponse, error) { var err error - defaultProject, exist := s.client.GetDefaultProjectID() - if exist && req.Organization == nil && req.Project == nil { - req.Project = &defaultProject - } - - defaultOrganization, exist := s.client.GetDefaultOrganizationID() - if exist && req.Organization == nil && req.Project == nil { - req.Organization = &defaultOrganization - } - if req.Zone == "" { defaultZone, _ := s.client.GetDefaultZone() req.Zone = defaultZone } + defaultProject, exist := s.client.GetDefaultProjectID() + if exist && req.Project == nil && req.Organization == nil { + req.Project = &defaultProject + } + + defaultOrganization, exist := s.client.GetDefaultOrganizationID() + if exist && req.Project == nil && req.Organization == nil { + req.Organization = &defaultOrganization + } + if req.Name == "" { req.Name = namegenerator.GetRandomName("snp") } @@ -3044,9 +4752,8 @@ func (s *API) CreateSnapshot(req *CreateSnapshotRequest, opts ...scw.RequestOpti } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/snapshots", - Headers: http.Header{}, + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/snapshots", } err = scwReq.SetBody(req) @@ -3063,15 +4770,7 @@ func (s *API) CreateSnapshot(req *CreateSnapshotRequest, opts ...scw.RequestOpti return &resp, nil } -type GetSnapshotRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // SnapshotID: UUID of the snapshot you want to get. - SnapshotID string `json:"-"` -} - -// GetSnapshot: get a snapshot. -// Get details of a snapshot with the specified ID. +// GetSnapshot: Get details of a snapshot with the specified ID. func (s *API) GetSnapshot(req *GetSnapshotRequest, opts ...scw.RequestOption) (*GetSnapshotResponse, error) { var err error @@ -3089,9 +4788,8 @@ func (s *API) GetSnapshot(req *GetSnapshotRequest, opts ...scw.RequestOption) (* } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/snapshots/" + fmt.Sprint(req.SnapshotID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/snapshots/" + fmt.Sprint(req.SnapshotID) + "", } var resp GetSnapshotResponse @@ -3103,43 +4801,13 @@ func (s *API) GetSnapshot(req *GetSnapshotRequest, opts ...scw.RequestOption) (* return &resp, nil } -type setSnapshotRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - SnapshotID string `json:"-"` - - ID string `json:"id"` - - Name string `json:"name"` - - Organization string `json:"organization"` - // VolumeType: default value: l_ssd - VolumeType VolumeVolumeType `json:"volume_type"` - - Size scw.Size `json:"size"` - // State: default value: available - State SnapshotState `json:"state"` - - BaseVolume *SnapshotBaseVolume `json:"base_volume"` - - CreationDate *time.Time `json:"creation_date"` - - ModificationDate *time.Time `json:"modification_date"` - - Project string `json:"project"` - - Tags *[]string `json:"tags"` -} - -// setSnapshot: update snapshot. -// Replace all snapshot properties with a snapshot message. +// setSnapshot: Replace all the properties of a snapshot. func (s *API) setSnapshot(req *setSnapshotRequest, opts ...scw.RequestOption) (*setSnapshotResponse, error) { var err error - if req.Project == "" { - defaultProject, _ := s.client.GetDefaultProjectID() - req.Project = defaultProject + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone } if req.Organization == "" { @@ -3147,9 +4815,9 @@ func (s *API) setSnapshot(req *setSnapshotRequest, opts ...scw.RequestOption) (* req.Organization = defaultOrganization } - if req.Zone == "" { - defaultZone, _ := s.client.GetDefaultZone() - req.Zone = defaultZone + if req.Project == "" { + defaultProject, _ := s.client.GetDefaultProjectID() + req.Project = defaultProject } if fmt.Sprint(req.Zone) == "" { @@ -3161,9 +4829,8 @@ func (s *API) setSnapshot(req *setSnapshotRequest, opts ...scw.RequestOption) (* } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/snapshots/" + fmt.Sprint(req.SnapshotID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/snapshots/" + fmt.Sprint(req.SnapshotID) + "", } err = scwReq.SetBody(req) @@ -3180,15 +4847,43 @@ func (s *API) setSnapshot(req *setSnapshotRequest, opts ...scw.RequestOption) (* return &resp, nil } -type DeleteSnapshotRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // SnapshotID: UUID of the snapshot you want to delete. - SnapshotID string `json:"-"` +// UpdateSnapshot: Update the properties of a snapshot. +func (s *API) UpdateSnapshot(req *UpdateSnapshotRequest, opts ...scw.RequestOption) (*UpdateSnapshotResponse, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + if fmt.Sprint(req.SnapshotID) == "" { + return nil, errors.New("field SnapshotID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "PATCH", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/snapshots/" + fmt.Sprint(req.SnapshotID) + "", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp UpdateSnapshotResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil } -// DeleteSnapshot: delete a snapshot. -// Delete the snapshot with the specified ID. +// DeleteSnapshot: Delete the snapshot with the specified ID. func (s *API) DeleteSnapshot(req *DeleteSnapshotRequest, opts ...scw.RequestOption) error { var err error @@ -3206,9 +4901,8 @@ func (s *API) DeleteSnapshot(req *DeleteSnapshotRequest, opts ...scw.RequestOpti } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/snapshots/" + fmt.Sprint(req.SnapshotID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/snapshots/" + fmt.Sprint(req.SnapshotID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -3218,19 +4912,7 @@ func (s *API) DeleteSnapshot(req *DeleteSnapshotRequest, opts ...scw.RequestOpti return nil } -type ExportSnapshotRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // SnapshotID: snapshot ID. - SnapshotID string `json:"-"` - // Bucket: s3 bucket name. - Bucket string `json:"bucket,omitempty"` - // Key: s3 object key. - Key string `json:"key,omitempty"` -} - -// ExportSnapshot: export a snapshot. -// Export a snapshot to a specified S3 bucket in the same region. +// ExportSnapshot: Export a snapshot to a specified S3 bucket in the same region. func (s *API) ExportSnapshot(req *ExportSnapshotRequest, opts ...scw.RequestOption) (*ExportSnapshotResponse, error) { var err error @@ -3248,9 +4930,8 @@ func (s *API) ExportSnapshot(req *ExportSnapshotRequest, opts ...scw.RequestOpti } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/snapshots/" + fmt.Sprint(req.SnapshotID) + "/export", - Headers: http.Header{}, + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/snapshots/" + fmt.Sprint(req.SnapshotID) + "/export", } err = scwReq.SetBody(req) @@ -3267,29 +4948,7 @@ func (s *API) ExportSnapshot(req *ExportSnapshotRequest, opts ...scw.RequestOpti return &resp, nil } -type ListVolumesRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // VolumeType: filter by volume type. - // Default value: l_ssd - VolumeType *VolumeVolumeType `json:"-"` - // PerPage: a positive integer lower or equal to 100 to select the number of items to return. - // Default value: 50 - PerPage *uint32 `json:"-"` - // Page: a positive integer to choose the page to return. - Page *int32 `json:"-"` - // Organization: filter volume by Organization ID. - Organization *string `json:"-"` - // Project: filter volume by Project ID. - Project *string `json:"-"` - // Tags: filter volumes with these exact tags (to filter with several tags, use commas to separate them). - Tags []string `json:"-"` - // Name: filter volume by name (for eg. "vol" will return "myvolume" but not "data"). - Name *string `json:"-"` -} - -// ListVolumes: list volumes. -// List volumes in the specified Availability Zone. You can filter the output by volume type. +// ListVolumes: List volumes in the specified Availability Zone. You can filter the output by volume type. func (s *API) ListVolumes(req *ListVolumesRequest, opts ...scw.RequestOption) (*ListVolumesResponse, error) { var err error @@ -3298,11 +4957,6 @@ func (s *API) ListVolumes(req *ListVolumesRequest, opts ...scw.RequestOption) (* req.Zone = defaultZone } - defaultPerPage, exist := s.client.GetDefaultPageSize() - if (req.PerPage == nil || *req.PerPage == 0) && exist { - req.PerPage = &defaultPerPage - } - query := url.Values{} parameter.AddToQuery(query, "volume_type", req.VolumeType) parameter.AddToQuery(query, "per_page", req.PerPage) @@ -3319,10 +4973,9 @@ func (s *API) ListVolumes(req *ListVolumesRequest, opts ...scw.RequestOption) (* } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/volumes", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/volumes", + Query: query, } var resp ListVolumesResponse @@ -3334,53 +4987,25 @@ func (s *API) ListVolumes(req *ListVolumesRequest, opts ...scw.RequestOption) (* return &resp, nil } -type CreateVolumeRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // Name: volume name. - Name string `json:"name,omitempty"` - // Deprecated: Organization: volume Organization ID. - // Precisely one of Organization, Project must be set. - Organization *string `json:"organization,omitempty"` - // Project: volume Project ID. - // Precisely one of Organization, Project must be set. - Project *string `json:"project,omitempty"` - // Tags: volume tags. - Tags []string `json:"tags,omitempty"` - // VolumeType: volume type. - // Default value: l_ssd - VolumeType VolumeVolumeType `json:"volume_type"` - // Size: volume disk size, must be a multiple of 512. - // Precisely one of BaseSnapshot, BaseVolume, Size must be set. - Size *scw.Size `json:"size,omitempty"` - // BaseVolume: ID of the volume on which this volume will be based. - // Precisely one of BaseSnapshot, BaseVolume, Size must be set. - BaseVolume *string `json:"base_volume,omitempty"` - // BaseSnapshot: ID of the snapshot on which this volume will be based. - // Precisely one of BaseSnapshot, BaseVolume, Size must be set. - BaseSnapshot *string `json:"base_snapshot,omitempty"` -} - -// CreateVolume: create a volume. -// Create a volume of a specified type in an Availability Zone. +// CreateVolume: Create a volume of a specified type in an Availability Zone. func (s *API) CreateVolume(req *CreateVolumeRequest, opts ...scw.RequestOption) (*CreateVolumeResponse, error) { var err error - defaultProject, exist := s.client.GetDefaultProjectID() - if exist && req.Organization == nil && req.Project == nil { - req.Project = &defaultProject - } - - defaultOrganization, exist := s.client.GetDefaultOrganizationID() - if exist && req.Organization == nil && req.Project == nil { - req.Organization = &defaultOrganization - } - if req.Zone == "" { defaultZone, _ := s.client.GetDefaultZone() req.Zone = defaultZone } + defaultProject, exist := s.client.GetDefaultProjectID() + if exist && req.Project == nil && req.Organization == nil { + req.Project = &defaultProject + } + + defaultOrganization, exist := s.client.GetDefaultOrganizationID() + if exist && req.Project == nil && req.Organization == nil { + req.Organization = &defaultOrganization + } + if req.Name == "" { req.Name = namegenerator.GetRandomName("vol") } @@ -3390,9 +5015,8 @@ func (s *API) CreateVolume(req *CreateVolumeRequest, opts ...scw.RequestOption) } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/volumes", - Headers: http.Header{}, + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/volumes", } err = scwReq.SetBody(req) @@ -3409,15 +5033,7 @@ func (s *API) CreateVolume(req *CreateVolumeRequest, opts ...scw.RequestOption) return &resp, nil } -type GetVolumeRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // VolumeID: UUID of the volume you want to get. - VolumeID string `json:"-"` -} - -// GetVolume: get a volume. -// Get details of a volume with the specified ID. +// GetVolume: Get details of a volume with the specified ID. func (s *API) GetVolume(req *GetVolumeRequest, opts ...scw.RequestOption) (*GetVolumeResponse, error) { var err error @@ -3435,9 +5051,8 @@ func (s *API) GetVolume(req *GetVolumeRequest, opts ...scw.RequestOption) (*GetV } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/volumes/" + fmt.Sprint(req.VolumeID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/volumes/" + fmt.Sprint(req.VolumeID) + "", } var resp GetVolumeResponse @@ -3449,21 +5064,7 @@ func (s *API) GetVolume(req *GetVolumeRequest, opts ...scw.RequestOption) (*GetV return &resp, nil } -type UpdateVolumeRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // VolumeID: UUID of the volume. - VolumeID string `json:"-"` - // Name: volume name. - Name *string `json:"name,omitempty"` - // Tags: tags of the volume. - Tags *[]string `json:"tags,omitempty"` - // Size: volume disk size, must be a multiple of 512. - Size *scw.Size `json:"size,omitempty"` -} - -// UpdateVolume: update a volume. -// Replace the name and/or size properties of a volume specified by its ID, with the specified value(s). Any volume name can be changed, however only `b_ssd` volumes can currently be increased in size. +// UpdateVolume: Replace the name and/or size properties of a volume specified by its ID, with the specified value(s). Any volume name can be changed, however only `b_ssd` volumes can currently be increased in size. func (s *API) UpdateVolume(req *UpdateVolumeRequest, opts ...scw.RequestOption) (*UpdateVolumeResponse, error) { var err error @@ -3481,9 +5082,8 @@ func (s *API) UpdateVolume(req *UpdateVolumeRequest, opts ...scw.RequestOption) } scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/volumes/" + fmt.Sprint(req.VolumeID) + "", - Headers: http.Header{}, + Method: "PATCH", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/volumes/" + fmt.Sprint(req.VolumeID) + "", } err = scwReq.SetBody(req) @@ -3500,15 +5100,7 @@ func (s *API) UpdateVolume(req *UpdateVolumeRequest, opts ...scw.RequestOption) return &resp, nil } -type DeleteVolumeRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // VolumeID: UUID of the volume you want to delete. - VolumeID string `json:"-"` -} - -// DeleteVolume: delete a volume. -// Delete the volume with the specified ID. +// DeleteVolume: Delete the volume with the specified ID. func (s *API) DeleteVolume(req *DeleteVolumeRequest, opts ...scw.RequestOption) error { var err error @@ -3526,9 +5118,8 @@ func (s *API) DeleteVolume(req *DeleteVolumeRequest, opts ...scw.RequestOption) } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/volumes/" + fmt.Sprint(req.VolumeID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/volumes/" + fmt.Sprint(req.VolumeID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -3538,28 +5129,7 @@ func (s *API) DeleteVolume(req *DeleteVolumeRequest, opts ...scw.RequestOption) return nil } -type ListSecurityGroupsRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // Name: name of the security group. - Name *string `json:"-"` - // Organization: security group Organization ID. - Organization *string `json:"-"` - // Project: security group Project ID. - Project *string `json:"-"` - // Tags: list security groups with these exact tags (to filter with several tags, use commas to separate them). - Tags []string `json:"-"` - // ProjectDefault: filter security groups with this value for project_default. - ProjectDefault *bool `json:"-"` - // PerPage: a positive integer lower or equal to 100 to select the number of items to return. - // Default value: 50 - PerPage *uint32 `json:"-"` - // Page: a positive integer to choose the page to return. - Page *int32 `json:"-"` -} - -// ListSecurityGroups: list security groups. -// List all existing security groups. +// ListSecurityGroups: List all existing security groups. func (s *API) ListSecurityGroups(req *ListSecurityGroupsRequest, opts ...scw.RequestOption) (*ListSecurityGroupsResponse, error) { var err error @@ -3568,11 +5138,6 @@ func (s *API) ListSecurityGroups(req *ListSecurityGroupsRequest, opts ...scw.Req req.Zone = defaultZone } - defaultPerPage, exist := s.client.GetDefaultPageSize() - if (req.PerPage == nil || *req.PerPage == 0) && exist { - req.PerPage = &defaultPerPage - } - query := url.Values{} parameter.AddToQuery(query, "name", req.Name) parameter.AddToQuery(query, "organization", req.Organization) @@ -3589,10 +5154,9 @@ func (s *API) ListSecurityGroups(req *ListSecurityGroupsRequest, opts ...scw.Req } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups", + Query: query, } var resp ListSecurityGroupsResponse @@ -3604,62 +5168,25 @@ func (s *API) ListSecurityGroups(req *ListSecurityGroupsRequest, opts ...scw.Req return &resp, nil } -type CreateSecurityGroupRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // Name: name of the security group. - Name string `json:"name,omitempty"` - // Description: description of the security group. - Description string `json:"description,omitempty"` - // Deprecated: Organization: organization ID the security group belongs to. - // Precisely one of Organization, Project must be set. - Organization *string `json:"organization,omitempty"` - // Project: project ID the security group belong to. - // Precisely one of Organization, Project must be set. - Project *string `json:"project,omitempty"` - // Tags: tags of the security group. - Tags []string `json:"tags,omitempty"` - // Deprecated: OrganizationDefault: defines whether this security group becomes the default security group for new Instances. - // Default value: false - // Precisely one of OrganizationDefault, ProjectDefault must be set. - OrganizationDefault *bool `json:"organization_default,omitempty"` - // ProjectDefault: whether this security group becomes the default security group for new Instances. - // Default value: false - // Precisely one of OrganizationDefault, ProjectDefault must be set. - ProjectDefault *bool `json:"project_default,omitempty"` - // Stateful: whether the security group is stateful or not. - // Default value: false - Stateful bool `json:"stateful,omitempty"` - // InboundDefaultPolicy: default policy for inbound rules. - // Default value: accept - InboundDefaultPolicy SecurityGroupPolicy `json:"inbound_default_policy"` - // OutboundDefaultPolicy: default policy for outbound rules. - // Default value: accept - OutboundDefaultPolicy SecurityGroupPolicy `json:"outbound_default_policy"` - // EnableDefaultSecurity: true to block SMTP on IPv4 and IPv6. This feature is read only, please open a support ticket if you need to make it configurable. - EnableDefaultSecurity *bool `json:"enable_default_security,omitempty"` -} - -// CreateSecurityGroup: create a security group. -// Create a security group with a specified name and description. +// CreateSecurityGroup: Create a security group with a specified name and description. func (s *API) CreateSecurityGroup(req *CreateSecurityGroupRequest, opts ...scw.RequestOption) (*CreateSecurityGroupResponse, error) { var err error - defaultProject, exist := s.client.GetDefaultProjectID() - if exist && req.Organization == nil && req.Project == nil { - req.Project = &defaultProject - } - - defaultOrganization, exist := s.client.GetDefaultOrganizationID() - if exist && req.Organization == nil && req.Project == nil { - req.Organization = &defaultOrganization - } - if req.Zone == "" { defaultZone, _ := s.client.GetDefaultZone() req.Zone = defaultZone } + defaultProject, exist := s.client.GetDefaultProjectID() + if exist && req.Project == nil && req.Organization == nil { + req.Project = &defaultProject + } + + defaultOrganization, exist := s.client.GetDefaultOrganizationID() + if exist && req.Project == nil && req.Organization == nil { + req.Organization = &defaultOrganization + } + if req.Name == "" { req.Name = namegenerator.GetRandomName("sg") } @@ -3669,9 +5196,8 @@ func (s *API) CreateSecurityGroup(req *CreateSecurityGroupRequest, opts ...scw.R } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups", - Headers: http.Header{}, + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups", } err = scwReq.SetBody(req) @@ -3688,15 +5214,7 @@ func (s *API) CreateSecurityGroup(req *CreateSecurityGroupRequest, opts ...scw.R return &resp, nil } -type GetSecurityGroupRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // SecurityGroupID: UUID of the security group you want to get. - SecurityGroupID string `json:"-"` -} - -// GetSecurityGroup: get a security group. -// Get the details of a security group with the specified ID. +// GetSecurityGroup: Get the details of a security group with the specified ID. func (s *API) GetSecurityGroup(req *GetSecurityGroupRequest, opts ...scw.RequestOption) (*GetSecurityGroupResponse, error) { var err error @@ -3714,9 +5232,8 @@ func (s *API) GetSecurityGroup(req *GetSecurityGroupRequest, opts ...scw.Request } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "", } var resp GetSecurityGroupResponse @@ -3728,15 +5245,7 @@ func (s *API) GetSecurityGroup(req *GetSecurityGroupRequest, opts ...scw.Request return &resp, nil } -type DeleteSecurityGroupRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // SecurityGroupID: UUID of the security group you want to delete. - SecurityGroupID string `json:"-"` -} - -// DeleteSecurityGroup: delete a security group. -// Delete a security group with the specified ID. +// DeleteSecurityGroup: Delete a security group with the specified ID. func (s *API) DeleteSecurityGroup(req *DeleteSecurityGroupRequest, opts ...scw.RequestOption) error { var err error @@ -3754,9 +5263,8 @@ func (s *API) DeleteSecurityGroup(req *DeleteSecurityGroupRequest, opts ...scw.R } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -3766,51 +5274,13 @@ func (s *API) DeleteSecurityGroup(req *DeleteSecurityGroupRequest, opts ...scw.R return nil } -type setSecurityGroupRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ID: ID of the security group (will be ignored). - ID string `json:"-"` - // Name: name of the security group. - Name string `json:"name"` - // Tags: tags of the security group. - Tags *[]string `json:"tags"` - // CreationDate: creation date of the security group (will be ignored). - CreationDate *time.Time `json:"creation_date"` - // ModificationDate: modification date of the security group (will be ignored). - ModificationDate *time.Time `json:"modification_date"` - // Description: description of the security group. - Description string `json:"description"` - // EnableDefaultSecurity: true to block SMTP on IPv4 and IPv6. This feature is read only, please open a support ticket if you need to make it configurable. - EnableDefaultSecurity bool `json:"enable_default_security"` - // InboundDefaultPolicy: default inbound policy. - // Default value: accept - InboundDefaultPolicy SecurityGroupPolicy `json:"inbound_default_policy"` - // OutboundDefaultPolicy: default outbound policy. - // Default value: accept - OutboundDefaultPolicy SecurityGroupPolicy `json:"outbound_default_policy"` - // Organization: security groups Organization ID. - Organization string `json:"organization"` - // Project: security group Project ID. - Project string `json:"project"` - // Deprecated: OrganizationDefault: please use project_default instead. - OrganizationDefault *bool `json:"organization_default"` - // ProjectDefault: true use this security group for future Instances created in this project. - ProjectDefault bool `json:"project_default"` - // Servers: instances attached to this security group. - Servers []*ServerSummary `json:"servers"` - // Stateful: true to set the security group as stateful. - Stateful bool `json:"stateful"` -} - -// setSecurityGroup: update a security group. -// Replace all security group properties with a security group message. +// setSecurityGroup: Replace all security group properties with a security group message. func (s *API) setSecurityGroup(req *setSecurityGroupRequest, opts ...scw.RequestOption) (*setSecurityGroupResponse, error) { var err error - if req.Project == "" { - defaultProject, _ := s.client.GetDefaultProjectID() - req.Project = defaultProject + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone } if req.Organization == "" { @@ -3818,9 +5288,9 @@ func (s *API) setSecurityGroup(req *setSecurityGroupRequest, opts ...scw.Request req.Organization = defaultOrganization } - if req.Zone == "" { - defaultZone, _ := s.client.GetDefaultZone() - req.Zone = defaultZone + if req.Project == "" { + defaultProject, _ := s.client.GetDefaultProjectID() + req.Project = defaultProject } if fmt.Sprint(req.Zone) == "" { @@ -3832,9 +5302,8 @@ func (s *API) setSecurityGroup(req *setSecurityGroupRequest, opts ...scw.Request } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.ID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.ID) + "", } err = scwReq.SetBody(req) @@ -3851,13 +5320,43 @@ func (s *API) setSecurityGroup(req *setSecurityGroupRequest, opts ...scw.Request return &resp, nil } -type ListDefaultSecurityGroupRulesRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` +// UpdateSecurityGroup: Update the properties of security group. +func (s *API) UpdateSecurityGroup(req *UpdateSecurityGroupRequest, opts ...scw.RequestOption) (*UpdateSecurityGroupResponse, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone + } + + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + if fmt.Sprint(req.SecurityGroupID) == "" { + return nil, errors.New("field SecurityGroupID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "PATCH", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp UpdateSecurityGroupResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil } -// ListDefaultSecurityGroupRules: get default rules. -// Lists the default rules applied to all the security groups. +// ListDefaultSecurityGroupRules: Lists the default rules applied to all the security groups. func (s *API) ListDefaultSecurityGroupRules(req *ListDefaultSecurityGroupRulesRequest, opts ...scw.RequestOption) (*ListSecurityGroupRulesResponse, error) { var err error @@ -3871,9 +5370,8 @@ func (s *API) ListDefaultSecurityGroupRules(req *ListDefaultSecurityGroupRulesRe } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/default/rules", - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/default/rules", } var resp ListSecurityGroupRulesResponse @@ -3885,20 +5383,7 @@ func (s *API) ListDefaultSecurityGroupRules(req *ListDefaultSecurityGroupRulesRe return &resp, nil } -type ListSecurityGroupRulesRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // SecurityGroupID: UUID of the security group. - SecurityGroupID string `json:"-"` - // PerPage: a positive integer lower or equal to 100 to select the number of items to return. - // Default value: 50 - PerPage *uint32 `json:"-"` - // Page: a positive integer to choose the page to return. - Page *int32 `json:"-"` -} - -// ListSecurityGroupRules: list rules. -// List the rules of the a specified security group ID. +// ListSecurityGroupRules: List the rules of the a specified security group ID. func (s *API) ListSecurityGroupRules(req *ListSecurityGroupRulesRequest, opts ...scw.RequestOption) (*ListSecurityGroupRulesResponse, error) { var err error @@ -3907,11 +5392,6 @@ func (s *API) ListSecurityGroupRules(req *ListSecurityGroupRulesRequest, opts .. req.Zone = defaultZone } - defaultPerPage, exist := s.client.GetDefaultPageSize() - if (req.PerPage == nil || *req.PerPage == 0) && exist { - req.PerPage = &defaultPerPage - } - query := url.Values{} parameter.AddToQuery(query, "per_page", req.PerPage) parameter.AddToQuery(query, "page", req.Page) @@ -3925,10 +5405,9 @@ func (s *API) ListSecurityGroupRules(req *ListSecurityGroupRulesRequest, opts .. } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "/rules", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "/rules", + Query: query, } var resp ListSecurityGroupRulesResponse @@ -3940,31 +5419,7 @@ func (s *API) ListSecurityGroupRules(req *ListSecurityGroupRulesRequest, opts .. return &resp, nil } -type CreateSecurityGroupRuleRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // SecurityGroupID: UUID of the security group. - SecurityGroupID string `json:"-"` - // Protocol: default value: TCP - Protocol SecurityGroupRuleProtocol `json:"protocol"` - // Direction: default value: inbound - Direction SecurityGroupRuleDirection `json:"direction"` - // Action: default value: accept - Action SecurityGroupRuleAction `json:"action"` - - IPRange scw.IPNet `json:"ip_range,omitempty"` - // DestPortFrom: beginning of the range of ports to apply this rule to (inclusive). - DestPortFrom *uint32 `json:"dest_port_from,omitempty"` - // DestPortTo: end of the range of ports to apply this rule to (inclusive). - DestPortTo *uint32 `json:"dest_port_to,omitempty"` - // Position: position of this rule in the security group rules list. - Position uint32 `json:"position,omitempty"` - // Editable: indicates if this rule is editable (will be ignored). - Editable bool `json:"editable,omitempty"` -} - -// CreateSecurityGroupRule: create rule. -// Create a rule in the specified security group ID. +// CreateSecurityGroupRule: Create a rule in the specified security group ID. func (s *API) CreateSecurityGroupRule(req *CreateSecurityGroupRuleRequest, opts ...scw.RequestOption) (*CreateSecurityGroupRuleResponse, error) { var err error @@ -3982,9 +5437,8 @@ func (s *API) CreateSecurityGroupRule(req *CreateSecurityGroupRuleRequest, opts } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "/rules", - Headers: http.Header{}, + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "/rules", } err = scwReq.SetBody(req) @@ -4001,17 +5455,7 @@ func (s *API) CreateSecurityGroupRule(req *CreateSecurityGroupRuleRequest, opts return &resp, nil } -type SetSecurityGroupRulesRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // SecurityGroupID: UUID of the security group to update the rules on. - SecurityGroupID string `json:"-"` - // Rules: list of rules to update in the security group. - Rules []*SetSecurityGroupRulesRequestRule `json:"rules"` -} - -// SetSecurityGroupRules: update all the rules of a security group. -// Replaces the existing rules of the security group with the rules provided. This endpoint supports the update of existing rules, creation of new rules and deletion of existing rules when they are not passed in the request. +// SetSecurityGroupRules: Replaces the existing rules of the security group with the rules provided. This endpoint supports the update of existing rules, creation of new rules and deletion of existing rules when they are not passed in the request. func (s *API) SetSecurityGroupRules(req *SetSecurityGroupRulesRequest, opts ...scw.RequestOption) (*SetSecurityGroupRulesResponse, error) { var err error @@ -4029,9 +5473,8 @@ func (s *API) SetSecurityGroupRules(req *SetSecurityGroupRulesRequest, opts ...s } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "/rules", - Headers: http.Header{}, + Method: "PUT", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "/rules", } err = scwReq.SetBody(req) @@ -4048,17 +5491,7 @@ func (s *API) SetSecurityGroupRules(req *SetSecurityGroupRulesRequest, opts ...s return &resp, nil } -type DeleteSecurityGroupRuleRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - SecurityGroupID string `json:"-"` - - SecurityGroupRuleID string `json:"-"` -} - -// DeleteSecurityGroupRule: delete rule. -// Delete a security group rule with the specified ID. +// DeleteSecurityGroupRule: Delete a security group rule with the specified ID. func (s *API) DeleteSecurityGroupRule(req *DeleteSecurityGroupRuleRequest, opts ...scw.RequestOption) error { var err error @@ -4080,9 +5513,8 @@ func (s *API) DeleteSecurityGroupRule(req *DeleteSecurityGroupRuleRequest, opts } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "/rules/" + fmt.Sprint(req.SecurityGroupRuleID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "/rules/" + fmt.Sprint(req.SecurityGroupRuleID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -4092,17 +5524,7 @@ func (s *API) DeleteSecurityGroupRule(req *DeleteSecurityGroupRuleRequest, opts return nil } -type GetSecurityGroupRuleRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - SecurityGroupID string `json:"-"` - - SecurityGroupRuleID string `json:"-"` -} - -// GetSecurityGroupRule: get rule. -// Get details of a security group rule with the specified ID. +// GetSecurityGroupRule: Get details of a security group rule with the specified ID. func (s *API) GetSecurityGroupRule(req *GetSecurityGroupRuleRequest, opts ...scw.RequestOption) (*GetSecurityGroupRuleResponse, error) { var err error @@ -4124,9 +5546,8 @@ func (s *API) GetSecurityGroupRule(req *GetSecurityGroupRuleRequest, opts ...scw } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "/rules/" + fmt.Sprint(req.SecurityGroupRuleID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "/rules/" + fmt.Sprint(req.SecurityGroupRuleID) + "", } var resp GetSecurityGroupRuleResponse @@ -4138,35 +5559,7 @@ func (s *API) GetSecurityGroupRule(req *GetSecurityGroupRuleRequest, opts ...scw return &resp, nil } -type setSecurityGroupRuleRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - SecurityGroupID string `json:"-"` - - SecurityGroupRuleID string `json:"-"` - - ID string `json:"id"` - // Protocol: default value: TCP - Protocol SecurityGroupRuleProtocol `json:"protocol"` - // Direction: default value: inbound - Direction SecurityGroupRuleDirection `json:"direction"` - // Action: default value: accept - Action SecurityGroupRuleAction `json:"action"` - - IPRange scw.IPNet `json:"ip_range"` - - DestPortFrom *uint32 `json:"dest_port_from"` - - DestPortTo *uint32 `json:"dest_port_to"` - - Position uint32 `json:"position"` - - Editable bool `json:"editable"` -} - -// setSecurityGroupRule: update security group rule. -// Update the rule of a specified security group ID. +// setSecurityGroupRule: Replace all the properties of a rule from a specified security group. func (s *API) setSecurityGroupRule(req *setSecurityGroupRuleRequest, opts ...scw.RequestOption) (*setSecurityGroupRuleResponse, error) { var err error @@ -4188,9 +5581,8 @@ func (s *API) setSecurityGroupRule(req *setSecurityGroupRuleRequest, opts ...scw } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "/rules/" + fmt.Sprint(req.SecurityGroupRuleID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "/rules/" + fmt.Sprint(req.SecurityGroupRuleID) + "", } err = scwReq.SetBody(req) @@ -4207,27 +5599,8 @@ func (s *API) setSecurityGroupRule(req *setSecurityGroupRuleRequest, opts ...scw return &resp, nil } -type ListPlacementGroupsRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // PerPage: a positive integer lower or equal to 100 to select the number of items to return. - // Default value: 50 - PerPage *uint32 `json:"-"` - // Page: a positive integer to choose the page to return. - Page *int32 `json:"-"` - // Organization: list only placement groups of this Organization ID. - Organization *string `json:"-"` - // Project: list only placement groups of this Project ID. - Project *string `json:"-"` - // Tags: list placement groups with these exact tags (to filter with several tags, use commas to separate them). - Tags []string `json:"-"` - // Name: filter placement groups by name (for eg. "cluster1" will return "cluster100" and "cluster1" but not "foo"). - Name *string `json:"-"` -} - -// ListPlacementGroups: list placement groups. -// List all placement groups in a specified Availability Zone. -func (s *API) ListPlacementGroups(req *ListPlacementGroupsRequest, opts ...scw.RequestOption) (*ListPlacementGroupsResponse, error) { +// UpdateSecurityGroupRule: Update the properties of a rule from a specified security group. +func (s *API) UpdateSecurityGroupRule(req *UpdateSecurityGroupRuleRequest, opts ...scw.RequestOption) (*UpdateSecurityGroupRuleResponse, error) { var err error if req.Zone == "" { @@ -4235,9 +5608,44 @@ func (s *API) ListPlacementGroups(req *ListPlacementGroupsRequest, opts ...scw.R req.Zone = defaultZone } - defaultPerPage, exist := s.client.GetDefaultPageSize() - if (req.PerPage == nil || *req.PerPage == 0) && exist { - req.PerPage = &defaultPerPage + if fmt.Sprint(req.Zone) == "" { + return nil, errors.New("field Zone cannot be empty in request") + } + + if fmt.Sprint(req.SecurityGroupID) == "" { + return nil, errors.New("field SecurityGroupID cannot be empty in request") + } + + if fmt.Sprint(req.SecurityGroupRuleID) == "" { + return nil, errors.New("field SecurityGroupRuleID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "PATCH", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/security_groups/" + fmt.Sprint(req.SecurityGroupID) + "/rules/" + fmt.Sprint(req.SecurityGroupRuleID) + "", + } + + err = scwReq.SetBody(req) + if err != nil { + return nil, err + } + + var resp UpdateSecurityGroupRuleResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// ListPlacementGroups: List all placement groups in a specified Availability Zone. +func (s *API) ListPlacementGroups(req *ListPlacementGroupsRequest, opts ...scw.RequestOption) (*ListPlacementGroupsResponse, error) { + var err error + + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone } query := url.Values{} @@ -4255,10 +5663,9 @@ func (s *API) ListPlacementGroups(req *ListPlacementGroupsRequest, opts ...scw.R } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups", + Query: query, } var resp ListPlacementGroupsResponse @@ -4270,47 +5677,25 @@ func (s *API) ListPlacementGroups(req *ListPlacementGroupsRequest, opts ...scw.R return &resp, nil } -type CreatePlacementGroupRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // Name: name of the placement group. - Name string `json:"name,omitempty"` - // Deprecated: Organization: organization ID of the placement group. - // Precisely one of Organization, Project must be set. - Organization *string `json:"organization,omitempty"` - // Project: project ID of the placement group. - // Precisely one of Organization, Project must be set. - Project *string `json:"project,omitempty"` - // Tags: tags of the placement group. - Tags []string `json:"tags,omitempty"` - // PolicyMode: operating mode of the placement group. - // Default value: optional - PolicyMode PlacementGroupPolicyMode `json:"policy_mode"` - // PolicyType: policy type of the placement group. - // Default value: max_availability - PolicyType PlacementGroupPolicyType `json:"policy_type"` -} - -// CreatePlacementGroup: create a placement group. -// Create a new placement group in a specified Availability Zone. +// CreatePlacementGroup: Create a new placement group in a specified Availability Zone. func (s *API) CreatePlacementGroup(req *CreatePlacementGroupRequest, opts ...scw.RequestOption) (*CreatePlacementGroupResponse, error) { var err error - defaultProject, exist := s.client.GetDefaultProjectID() - if exist && req.Organization == nil && req.Project == nil { - req.Project = &defaultProject - } - - defaultOrganization, exist := s.client.GetDefaultOrganizationID() - if exist && req.Organization == nil && req.Project == nil { - req.Organization = &defaultOrganization - } - if req.Zone == "" { defaultZone, _ := s.client.GetDefaultZone() req.Zone = defaultZone } + defaultProject, exist := s.client.GetDefaultProjectID() + if exist && req.Project == nil && req.Organization == nil { + req.Project = &defaultProject + } + + defaultOrganization, exist := s.client.GetDefaultOrganizationID() + if exist && req.Project == nil && req.Organization == nil { + req.Organization = &defaultOrganization + } + if req.Name == "" { req.Name = namegenerator.GetRandomName("pg") } @@ -4320,9 +5705,8 @@ func (s *API) CreatePlacementGroup(req *CreatePlacementGroupRequest, opts ...scw } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups", - Headers: http.Header{}, + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups", } err = scwReq.SetBody(req) @@ -4339,15 +5723,7 @@ func (s *API) CreatePlacementGroup(req *CreatePlacementGroupRequest, opts ...scw return &resp, nil } -type GetPlacementGroupRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // PlacementGroupID: UUID of the placement group you want to get. - PlacementGroupID string `json:"-"` -} - -// GetPlacementGroup: get a placement group. -// Get the specified placement group. +// GetPlacementGroup: Get the specified placement group. func (s *API) GetPlacementGroup(req *GetPlacementGroupRequest, opts ...scw.RequestOption) (*GetPlacementGroupResponse, error) { var err error @@ -4365,9 +5741,8 @@ func (s *API) GetPlacementGroup(req *GetPlacementGroupRequest, opts ...scw.Reque } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups/" + fmt.Sprint(req.PlacementGroupID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups/" + fmt.Sprint(req.PlacementGroupID) + "", } var resp GetPlacementGroupResponse @@ -4379,33 +5754,13 @@ func (s *API) GetPlacementGroup(req *GetPlacementGroupRequest, opts ...scw.Reque return &resp, nil } -type SetPlacementGroupRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - PlacementGroupID string `json:"-"` - - Name string `json:"name"` - - Organization string `json:"organization"` - // PolicyMode: default value: optional - PolicyMode PlacementGroupPolicyMode `json:"policy_mode"` - // PolicyType: default value: max_availability - PolicyType PlacementGroupPolicyType `json:"policy_type"` - - Project string `json:"project"` - - Tags *[]string `json:"tags"` -} - -// SetPlacementGroup: set placement group. -// Set all parameters of the specified placement group. +// SetPlacementGroup: Set all parameters of the specified placement group. func (s *API) SetPlacementGroup(req *SetPlacementGroupRequest, opts ...scw.RequestOption) (*SetPlacementGroupResponse, error) { var err error - if req.Project == "" { - defaultProject, _ := s.client.GetDefaultProjectID() - req.Project = defaultProject + if req.Zone == "" { + defaultZone, _ := s.client.GetDefaultZone() + req.Zone = defaultZone } if req.Organization == "" { @@ -4413,9 +5768,9 @@ func (s *API) SetPlacementGroup(req *SetPlacementGroupRequest, opts ...scw.Reque req.Organization = defaultOrganization } - if req.Zone == "" { - defaultZone, _ := s.client.GetDefaultZone() - req.Zone = defaultZone + if req.Project == "" { + defaultProject, _ := s.client.GetDefaultProjectID() + req.Project = defaultProject } if fmt.Sprint(req.Zone) == "" { @@ -4427,9 +5782,8 @@ func (s *API) SetPlacementGroup(req *SetPlacementGroupRequest, opts ...scw.Reque } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups/" + fmt.Sprint(req.PlacementGroupID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups/" + fmt.Sprint(req.PlacementGroupID) + "", } err = scwReq.SetBody(req) @@ -4446,25 +5800,7 @@ func (s *API) SetPlacementGroup(req *SetPlacementGroupRequest, opts ...scw.Reque return &resp, nil } -type UpdatePlacementGroupRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // PlacementGroupID: UUID of the placement group. - PlacementGroupID string `json:"-"` - // Name: name of the placement group. - Name *string `json:"name,omitempty"` - // Tags: tags of the placement group. - Tags *[]string `json:"tags,omitempty"` - // PolicyMode: operating mode of the placement group. - // Default value: optional - PolicyMode *PlacementGroupPolicyMode `json:"policy_mode,omitempty"` - // PolicyType: policy type of the placement group. - // Default value: max_availability - PolicyType *PlacementGroupPolicyType `json:"policy_type,omitempty"` -} - -// UpdatePlacementGroup: update a placement group. -// Update one or more parameter of the specified placement group. +// UpdatePlacementGroup: Update one or more parameter of the specified placement group. func (s *API) UpdatePlacementGroup(req *UpdatePlacementGroupRequest, opts ...scw.RequestOption) (*UpdatePlacementGroupResponse, error) { var err error @@ -4482,9 +5818,8 @@ func (s *API) UpdatePlacementGroup(req *UpdatePlacementGroupRequest, opts ...scw } scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups/" + fmt.Sprint(req.PlacementGroupID) + "", - Headers: http.Header{}, + Method: "PATCH", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups/" + fmt.Sprint(req.PlacementGroupID) + "", } err = scwReq.SetBody(req) @@ -4501,14 +5836,7 @@ func (s *API) UpdatePlacementGroup(req *UpdatePlacementGroupRequest, opts ...scw return &resp, nil } -type DeletePlacementGroupRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // PlacementGroupID: UUID of the placement group you want to delete. - PlacementGroupID string `json:"-"` -} - -// DeletePlacementGroup: delete the specified placement group. +// DeletePlacementGroup: Delete the specified placement group. func (s *API) DeletePlacementGroup(req *DeletePlacementGroupRequest, opts ...scw.RequestOption) error { var err error @@ -4526,9 +5854,8 @@ func (s *API) DeletePlacementGroup(req *DeletePlacementGroupRequest, opts ...scw } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups/" + fmt.Sprint(req.PlacementGroupID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups/" + fmt.Sprint(req.PlacementGroupID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -4538,15 +5865,7 @@ func (s *API) DeletePlacementGroup(req *DeletePlacementGroupRequest, opts ...scw return nil } -type GetPlacementGroupServersRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // PlacementGroupID: UUID of the placement group you want to get. - PlacementGroupID string `json:"-"` -} - -// GetPlacementGroupServers: get placement group servers. -// Get all Instances belonging to the specified placement group. +// GetPlacementGroupServers: Get all Instances belonging to the specified placement group. func (s *API) GetPlacementGroupServers(req *GetPlacementGroupServersRequest, opts ...scw.RequestOption) (*GetPlacementGroupServersResponse, error) { var err error @@ -4564,9 +5883,8 @@ func (s *API) GetPlacementGroupServers(req *GetPlacementGroupServersRequest, opt } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups/" + fmt.Sprint(req.PlacementGroupID) + "/servers", - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups/" + fmt.Sprint(req.PlacementGroupID) + "/servers", } var resp GetPlacementGroupServersResponse @@ -4578,17 +5896,7 @@ func (s *API) GetPlacementGroupServers(req *GetPlacementGroupServersRequest, opt return &resp, nil } -type SetPlacementGroupServersRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // PlacementGroupID: UUID of the placement group you want to set. - PlacementGroupID string `json:"-"` - // Servers: an array of the Instances' UUIDs you want to configure. - Servers []string `json:"servers"` -} - -// SetPlacementGroupServers: set placement group servers. -// Set all Instances belonging to the specified placement group. +// SetPlacementGroupServers: Set all Instances belonging to the specified placement group. func (s *API) SetPlacementGroupServers(req *SetPlacementGroupServersRequest, opts ...scw.RequestOption) (*SetPlacementGroupServersResponse, error) { var err error @@ -4606,9 +5914,8 @@ func (s *API) SetPlacementGroupServers(req *SetPlacementGroupServersRequest, opt } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups/" + fmt.Sprint(req.PlacementGroupID) + "/servers", - Headers: http.Header{}, + Method: "PUT", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups/" + fmt.Sprint(req.PlacementGroupID) + "/servers", } err = scwReq.SetBody(req) @@ -4625,17 +5932,7 @@ func (s *API) SetPlacementGroupServers(req *SetPlacementGroupServersRequest, opt return &resp, nil } -type UpdatePlacementGroupServersRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // PlacementGroupID: UUID of the placement group you want to update. - PlacementGroupID string `json:"-"` - // Servers: an array of the Instances' UUIDs you want to configure. - Servers []string `json:"servers,omitempty"` -} - -// UpdatePlacementGroupServers: update placement group servers. -// Update all Instances belonging to the specified placement group. +// UpdatePlacementGroupServers: Update all Instances belonging to the specified placement group. func (s *API) UpdatePlacementGroupServers(req *UpdatePlacementGroupServersRequest, opts ...scw.RequestOption) (*UpdatePlacementGroupServersResponse, error) { var err error @@ -4653,9 +5950,8 @@ func (s *API) UpdatePlacementGroupServers(req *UpdatePlacementGroupServersReques } scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups/" + fmt.Sprint(req.PlacementGroupID) + "/servers", - Headers: http.Header{}, + Method: "PATCH", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/placement_groups/" + fmt.Sprint(req.PlacementGroupID) + "/servers", } err = scwReq.SetBody(req) @@ -4672,26 +5968,7 @@ func (s *API) UpdatePlacementGroupServers(req *UpdatePlacementGroupServersReques return &resp, nil } -type ListIPsRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // Project: project ID in which the IPs are reserved. - Project *string `json:"-"` - // Organization: organization ID in which the IPs are reserved. - Organization *string `json:"-"` - // Tags: filter IPs with these exact tags (to filter with several tags, use commas to separate them). - Tags []string `json:"-"` - // Name: filter on the IP address (Works as a LIKE operation on the IP address). - Name *string `json:"-"` - // PerPage: a positive integer lower or equal to 100 to select the number of items to return. - // Default value: 50 - PerPage *uint32 `json:"-"` - // Page: a positive integer to choose the page to return. - Page *int32 `json:"-"` -} - -// ListIPs: list all flexible IPs. -// List all flexible IPs in a specified zone. +// ListIPs: List all flexible IPs in a specified zone. func (s *API) ListIPs(req *ListIPsRequest, opts ...scw.RequestOption) (*ListIPsResponse, error) { var err error @@ -4700,11 +5977,6 @@ func (s *API) ListIPs(req *ListIPsRequest, opts ...scw.RequestOption) (*ListIPsR req.Zone = defaultZone } - defaultPerPage, exist := s.client.GetDefaultPageSize() - if (req.PerPage == nil || *req.PerPage == 0) && exist { - req.PerPage = &defaultPerPage - } - query := url.Values{} parameter.AddToQuery(query, "project", req.Project) parameter.AddToQuery(query, "organization", req.Organization) @@ -4714,16 +5986,16 @@ func (s *API) ListIPs(req *ListIPsRequest, opts ...scw.RequestOption) (*ListIPsR parameter.AddToQuery(query, "name", req.Name) parameter.AddToQuery(query, "per_page", req.PerPage) parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "type", req.Type) if fmt.Sprint(req.Zone) == "" { return nil, errors.New("field Zone cannot be empty in request") } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/ips", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/ips", + Query: query, } var resp ListIPsResponse @@ -4735,52 +6007,32 @@ func (s *API) ListIPs(req *ListIPsRequest, opts ...scw.RequestOption) (*ListIPsR return &resp, nil } -type CreateIPRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // Deprecated: Organization: organization ID in which the IP is reserved. - // Precisely one of Organization, Project must be set. - Organization *string `json:"organization,omitempty"` - // Project: project ID in which the IP is reserved. - // Precisely one of Organization, Project must be set. - Project *string `json:"project,omitempty"` - // Tags: tags of the IP. - Tags []string `json:"tags,omitempty"` - // Server: UUID of the Instance you want to attach the IP to. - Server *string `json:"server,omitempty"` - // Type: IP type to reserve (either 'nat', 'routed_ipv4' or 'routed_ipv6'). - // Default value: unknown_iptype - Type IPType `json:"type"` -} - -// CreateIP: reserve a flexible IP. -// Reserve a flexible IP and attach it to the specified Instance. +// CreateIP: Reserve a flexible IP and attach it to the specified Instance. func (s *API) CreateIP(req *CreateIPRequest, opts ...scw.RequestOption) (*CreateIPResponse, error) { var err error - defaultProject, exist := s.client.GetDefaultProjectID() - if exist && req.Organization == nil && req.Project == nil { - req.Project = &defaultProject - } - - defaultOrganization, exist := s.client.GetDefaultOrganizationID() - if exist && req.Organization == nil && req.Project == nil { - req.Organization = &defaultOrganization - } - if req.Zone == "" { defaultZone, _ := s.client.GetDefaultZone() req.Zone = defaultZone } + defaultProject, exist := s.client.GetDefaultProjectID() + if exist && req.Project == nil && req.Organization == nil { + req.Project = &defaultProject + } + + defaultOrganization, exist := s.client.GetDefaultOrganizationID() + if exist && req.Project == nil && req.Organization == nil { + req.Organization = &defaultOrganization + } + if fmt.Sprint(req.Zone) == "" { return nil, errors.New("field Zone cannot be empty in request") } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/ips", - Headers: http.Header{}, + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/ips", } err = scwReq.SetBody(req) @@ -4797,15 +6049,7 @@ func (s *API) CreateIP(req *CreateIPRequest, opts ...scw.RequestOption) (*Create return &resp, nil } -type GetIPRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // IP: IP ID or address to get. - IP string `json:"-"` -} - -// GetIP: get a flexible IP. -// Get details of an IP with the specified ID or address. +// GetIP: Get details of an IP with the specified ID or address. func (s *API) GetIP(req *GetIPRequest, opts ...scw.RequestOption) (*GetIPResponse, error) { var err error @@ -4823,9 +6067,8 @@ func (s *API) GetIP(req *GetIPRequest, opts ...scw.RequestOption) (*GetIPRespons } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IP) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IP) + "", } var resp GetIPResponse @@ -4837,24 +6080,7 @@ func (s *API) GetIP(req *GetIPRequest, opts ...scw.RequestOption) (*GetIPRespons return &resp, nil } -type UpdateIPRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // IP: IP ID or IP address. - IP string `json:"-"` - // Reverse: reverse domain name. - Reverse *NullableStringValue `json:"reverse,omitempty"` - // Type: convert a 'nat' IP to a 'routed_ipv4'. - // Default value: unknown_iptype - Type IPType `json:"type"` - // Tags: an array of keywords you want to tag this IP with. - Tags *[]string `json:"tags,omitempty"` - - Server *NullableStringValue `json:"server,omitempty"` -} - -// UpdateIP: update a flexible IP. -// Update a flexible IP in the specified zone with the specified ID. +// UpdateIP: Update a flexible IP in the specified zone with the specified ID. func (s *API) UpdateIP(req *UpdateIPRequest, opts ...scw.RequestOption) (*UpdateIPResponse, error) { var err error @@ -4872,9 +6098,8 @@ func (s *API) UpdateIP(req *UpdateIPRequest, opts ...scw.RequestOption) (*Update } scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IP) + "", - Headers: http.Header{}, + Method: "PATCH", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IP) + "", } err = scwReq.SetBody(req) @@ -4891,15 +6116,7 @@ func (s *API) UpdateIP(req *UpdateIPRequest, opts ...scw.RequestOption) (*Update return &resp, nil } -type DeleteIPRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // IP: ID or address of the IP to delete. - IP string `json:"-"` -} - -// DeleteIP: delete a flexible IP. -// Delete the IP with the specified ID. +// DeleteIP: Delete the IP with the specified ID. func (s *API) DeleteIP(req *DeleteIPRequest, opts ...scw.RequestOption) error { var err error @@ -4917,9 +6134,8 @@ func (s *API) DeleteIP(req *DeleteIPRequest, opts ...scw.RequestOption) error { } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IP) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IP) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -4929,22 +6145,7 @@ func (s *API) DeleteIP(req *DeleteIPRequest, opts ...scw.RequestOption) error { return nil } -type ListPrivateNICsRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ServerID: instance to which the private NIC is attached. - ServerID string `json:"-"` - // Tags: private NIC tags. - Tags []string `json:"-"` - // PerPage: a positive integer lower or equal to 100 to select the number of items to return. - // Default value: 50 - PerPage *uint32 `json:"-"` - // Page: a positive integer to choose the page to return. - Page *int32 `json:"-"` -} - -// ListPrivateNICs: list all private NICs. -// List all private NICs of a specified Instance. +// ListPrivateNICs: List all private NICs of a specified Instance. func (s *API) ListPrivateNICs(req *ListPrivateNICsRequest, opts ...scw.RequestOption) (*ListPrivateNICsResponse, error) { var err error @@ -4953,11 +6154,6 @@ func (s *API) ListPrivateNICs(req *ListPrivateNICsRequest, opts ...scw.RequestOp req.Zone = defaultZone } - defaultPerPage, exist := s.client.GetDefaultPageSize() - if (req.PerPage == nil || *req.PerPage == 0) && exist { - req.PerPage = &defaultPerPage - } - query := url.Values{} if len(req.Tags) != 0 { parameter.AddToQuery(query, "tags", strings.Join(req.Tags, ",")) @@ -4974,10 +6170,9 @@ func (s *API) ListPrivateNICs(req *ListPrivateNICsRequest, opts ...scw.RequestOp } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/private_nics", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/private_nics", + Query: query, } var resp ListPrivateNICsResponse @@ -4989,20 +6184,7 @@ func (s *API) ListPrivateNICs(req *ListPrivateNICsRequest, opts ...scw.RequestOp return &resp, nil } -type CreatePrivateNICRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ServerID: UUID of the Instance the private NIC will be attached to. - ServerID string `json:"-"` - // PrivateNetworkID: UUID of the private network where the private NIC will be attached. - PrivateNetworkID string `json:"private_network_id,omitempty"` - // Tags: private NIC tags. - Tags []string `json:"tags,omitempty"` - // IPIDs: ip_ids defined from IPAM. - IPIDs []string `json:"ip_ids,omitempty"` -} - -// CreatePrivateNIC: create a private NIC connecting an Instance to a Private Network. +// CreatePrivateNIC: Create a private NIC connecting an Instance to a Private Network. func (s *API) CreatePrivateNIC(req *CreatePrivateNICRequest, opts ...scw.RequestOption) (*CreatePrivateNICResponse, error) { var err error @@ -5020,9 +6202,8 @@ func (s *API) CreatePrivateNIC(req *CreatePrivateNICRequest, opts ...scw.Request } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/private_nics", - Headers: http.Header{}, + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/private_nics", } err = scwReq.SetBody(req) @@ -5039,17 +6220,7 @@ func (s *API) CreatePrivateNIC(req *CreatePrivateNICRequest, opts ...scw.Request return &resp, nil } -type GetPrivateNICRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ServerID: instance to which the private NIC is attached. - ServerID string `json:"-"` - // PrivateNicID: private NIC unique ID. - PrivateNicID string `json:"-"` -} - -// GetPrivateNIC: get a private NIC. -// Get private NIC properties. +// GetPrivateNIC: Get private NIC properties. func (s *API) GetPrivateNIC(req *GetPrivateNICRequest, opts ...scw.RequestOption) (*GetPrivateNICResponse, error) { var err error @@ -5071,9 +6242,8 @@ func (s *API) GetPrivateNIC(req *GetPrivateNICRequest, opts ...scw.RequestOption } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/private_nics/" + fmt.Sprint(req.PrivateNicID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/private_nics/" + fmt.Sprint(req.PrivateNicID) + "", } var resp GetPrivateNICResponse @@ -5085,19 +6255,7 @@ func (s *API) GetPrivateNIC(req *GetPrivateNICRequest, opts ...scw.RequestOption return &resp, nil } -type UpdatePrivateNICRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ServerID: UUID of the Instance the private NIC will be attached to. - ServerID string `json:"-"` - // PrivateNicID: private NIC unique ID. - PrivateNicID string `json:"-"` - // Tags: tags used to select private NIC/s. - Tags *[]string `json:"tags,omitempty"` -} - -// UpdatePrivateNIC: update a private NIC. -// Update one or more parameter(s) of a specified private NIC. +// UpdatePrivateNIC: Update one or more parameter(s) of a specified private NIC. func (s *API) UpdatePrivateNIC(req *UpdatePrivateNICRequest, opts ...scw.RequestOption) (*PrivateNIC, error) { var err error @@ -5119,9 +6277,8 @@ func (s *API) UpdatePrivateNIC(req *UpdatePrivateNICRequest, opts ...scw.Request } scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/private_nics/" + fmt.Sprint(req.PrivateNicID) + "", - Headers: http.Header{}, + Method: "PATCH", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/private_nics/" + fmt.Sprint(req.PrivateNicID) + "", } err = scwReq.SetBody(req) @@ -5138,16 +6295,7 @@ func (s *API) UpdatePrivateNIC(req *UpdatePrivateNICRequest, opts ...scw.Request return &resp, nil } -type DeletePrivateNICRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ServerID: instance to which the private NIC is attached. - ServerID string `json:"-"` - // PrivateNicID: private NIC unique ID. - PrivateNicID string `json:"-"` -} - -// DeletePrivateNIC: delete a private NIC. +// DeletePrivateNIC: Delete a private NIC. func (s *API) DeletePrivateNIC(req *DeletePrivateNICRequest, opts ...scw.RequestOption) error { var err error @@ -5169,9 +6317,8 @@ func (s *API) DeletePrivateNIC(req *DeletePrivateNICRequest, opts ...scw.Request } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/private_nics/" + fmt.Sprint(req.PrivateNicID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/servers/" + fmt.Sprint(req.ServerID) + "/private_nics/" + fmt.Sprint(req.PrivateNicID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -5181,24 +6328,7 @@ func (s *API) DeletePrivateNIC(req *DeletePrivateNICRequest, opts ...scw.Request return nil } -type ListBootscriptsRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - Arch *string `json:"-"` - - Title *string `json:"-"` - - Default *bool `json:"-"` - - Public *bool `json:"-"` - - PerPage *uint32 `json:"-"` - - Page *int32 `json:"-"` -} - -// Deprecated: ListBootscripts: list bootscripts. +// Deprecated: ListBootscripts: List bootscripts. func (s *API) ListBootscripts(req *ListBootscriptsRequest, opts ...scw.RequestOption) (*ListBootscriptsResponse, error) { var err error @@ -5207,11 +6337,6 @@ func (s *API) ListBootscripts(req *ListBootscriptsRequest, opts ...scw.RequestOp req.Zone = defaultZone } - defaultPerPage, exist := s.client.GetDefaultPageSize() - if (req.PerPage == nil || *req.PerPage == 0) && exist { - req.PerPage = &defaultPerPage - } - query := url.Values{} parameter.AddToQuery(query, "arch", req.Arch) parameter.AddToQuery(query, "title", req.Title) @@ -5225,10 +6350,9 @@ func (s *API) ListBootscripts(req *ListBootscriptsRequest, opts ...scw.RequestOp } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/bootscripts", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/bootscripts", + Query: query, } var resp ListBootscriptsResponse @@ -5240,15 +6364,7 @@ func (s *API) ListBootscripts(req *ListBootscriptsRequest, opts ...scw.RequestOp return &resp, nil } -type GetBootscriptRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - BootscriptID string `json:"-"` -} - -// Deprecated: GetBootscript: get bootscripts. -// Get details of a bootscript with the specified ID. +// Deprecated: GetBootscript: Get details of a bootscript with the specified ID. func (s *API) GetBootscript(req *GetBootscriptRequest, opts ...scw.RequestOption) (*GetBootscriptResponse, error) { var err error @@ -5266,9 +6382,8 @@ func (s *API) GetBootscript(req *GetBootscriptRequest, opts ...scw.RequestOption } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/bootscripts/" + fmt.Sprint(req.BootscriptID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/bootscripts/" + fmt.Sprint(req.BootscriptID) + "", } var resp GetBootscriptResponse @@ -5280,15 +6395,7 @@ func (s *API) GetBootscript(req *GetBootscriptRequest, opts ...scw.RequestOption return &resp, nil } -type GetDashboardRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - - Organization *string `json:"-"` - - Project *string `json:"-"` -} - +// GetDashboard: func (s *API) GetDashboard(req *GetDashboardRequest, opts ...scw.RequestOption) (*GetDashboardResponse, error) { var err error @@ -5306,10 +6413,9 @@ func (s *API) GetDashboard(req *GetDashboardRequest, opts ...scw.RequestOption) } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/dashboard", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/dashboard", + Query: query, } var resp GetDashboardResponse @@ -5321,19 +6427,7 @@ func (s *API) GetDashboard(req *GetDashboardRequest, opts ...scw.RequestOption) return &resp, nil } -type PlanBlockMigrationRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // VolumeID: the volume for which the migration plan will be generated. - // Precisely one of SnapshotID, VolumeID must be set. - VolumeID *string `json:"volume_id,omitempty"` - // SnapshotID: the snapshot for which the migration plan will be generated. - // Precisely one of SnapshotID, VolumeID must be set. - SnapshotID *string `json:"snapshot_id,omitempty"` -} - -// PlanBlockMigration: get a volume or snapshot's migration plan. -// Given a volume or snapshot, returns the migration plan for a call to the RPC ApplyBlockMigration. This plan will include zero or one volume, and zero or more snapshots, which will need to be migrated together. This RPC does not perform the actual migration itself, ApplyBlockMigration must be used. The validation_key value returned by this call must be provided to the ApplyBlockMigration call to confirm that all resources listed in the plan should be migrated. +// PlanBlockMigration: Given a volume or snapshot, returns the migration plan for a call to the RPC ApplyBlockMigration. This plan will include zero or one volume, and zero or more snapshots, which will need to be migrated together. This RPC does not perform the actual migration itself, ApplyBlockMigration must be used. The validation_key value returned by this call must be provided to the ApplyBlockMigration call to confirm that all resources listed in the plan should be migrated. func (s *API) PlanBlockMigration(req *PlanBlockMigrationRequest, opts ...scw.RequestOption) (*MigrationPlan, error) { var err error @@ -5347,9 +6441,8 @@ func (s *API) PlanBlockMigration(req *PlanBlockMigrationRequest, opts ...scw.Req } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/block-migration/plan", - Headers: http.Header{}, + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/block-migration/plan", } err = scwReq.SetBody(req) @@ -5366,21 +6459,7 @@ func (s *API) PlanBlockMigration(req *PlanBlockMigrationRequest, opts ...scw.Req return &resp, nil } -type ApplyBlockMigrationRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // VolumeID: the volume to migrate, along with potentially other resources, according to the migration plan generated with a call to PlanBlockMigration. - // Precisely one of SnapshotID, VolumeID must be set. - VolumeID *string `json:"volume_id,omitempty"` - // SnapshotID: the snapshot to migrate, along with potentially other resources, according to the migration plan generated with a call to PlanBlockMigration. - // Precisely one of SnapshotID, VolumeID must be set. - SnapshotID *string `json:"snapshot_id,omitempty"` - // ValidationKey: a value to be retrieved from a call to PlanBlockMigration, to confirm that the volume and/or snapshots specified in said plan should be migrated. - ValidationKey string `json:"validation_key,omitempty"` -} - -// ApplyBlockMigration: migrate a volume and/or snapshots to SBS (Scaleway Block Storage). -// To be used, this RPC must be preceded by a call to PlanBlockMigration. To migrate all resources mentioned in the MigrationPlan, the validation_key returned in the MigrationPlan must be provided. +// ApplyBlockMigration: To be used, this RPC must be preceded by a call to PlanBlockMigration. To migrate all resources mentioned in the MigrationPlan, the validation_key returned in the MigrationPlan must be provided. func (s *API) ApplyBlockMigration(req *ApplyBlockMigrationRequest, opts ...scw.RequestOption) error { var err error @@ -5394,9 +6473,8 @@ func (s *API) ApplyBlockMigration(req *ApplyBlockMigrationRequest, opts ...scw.R } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/block-migration/apply", - Headers: http.Header{}, + Method: "POST", + Path: "/instance/v1/zones/" + fmt.Sprint(req.Zone) + "/block-migration/apply", } err = scwReq.SetBody(req) @@ -5410,193 +6488,3 @@ func (s *API) ApplyBlockMigration(req *ApplyBlockMigrationRequest, opts ...scw.R } return nil } - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListServersResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListServersResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListServersResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Servers = append(r.Servers, results.Servers...) - r.TotalCount += uint32(len(results.Servers)) - return uint32(len(results.Servers)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListImagesResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListImagesResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListImagesResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Images = append(r.Images, results.Images...) - r.TotalCount += uint32(len(results.Images)) - return uint32(len(results.Images)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListSnapshotsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListSnapshotsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListSnapshotsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Snapshots = append(r.Snapshots, results.Snapshots...) - r.TotalCount += uint32(len(results.Snapshots)) - return uint32(len(results.Snapshots)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListVolumesResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListVolumesResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListVolumesResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Volumes = append(r.Volumes, results.Volumes...) - r.TotalCount += uint32(len(results.Volumes)) - return uint32(len(results.Volumes)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListSecurityGroupsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListSecurityGroupsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListSecurityGroupsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.SecurityGroups = append(r.SecurityGroups, results.SecurityGroups...) - r.TotalCount += uint32(len(results.SecurityGroups)) - return uint32(len(results.SecurityGroups)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListSecurityGroupRulesResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListSecurityGroupRulesResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListSecurityGroupRulesResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Rules = append(r.Rules, results.Rules...) - r.TotalCount += uint32(len(results.Rules)) - return uint32(len(results.Rules)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListPlacementGroupsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListPlacementGroupsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListPlacementGroupsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.PlacementGroups = append(r.PlacementGroups, results.PlacementGroups...) - r.TotalCount += uint32(len(results.PlacementGroups)) - return uint32(len(results.PlacementGroups)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListIPsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListIPsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListIPsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.IPs = append(r.IPs, results.IPs...) - r.TotalCount += uint32(len(results.IPs)) - return uint32(len(results.IPs)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListPrivateNICsResponse) UnsafeGetTotalCount() uint64 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListPrivateNICsResponse) UnsafeAppend(res interface{}) (uint64, error) { - results, ok := res.(*ListPrivateNICsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.PrivateNics = append(r.PrivateNics, results.PrivateNics...) - r.TotalCount += uint64(len(results.PrivateNics)) - return uint64(len(results.PrivateNics)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListBootscriptsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListBootscriptsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListBootscriptsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Bootscripts = append(r.Bootscripts, results.Bootscripts...) - r.TotalCount += uint32(len(results.Bootscripts)) - return uint32(len(results.Bootscripts)), nil -} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_utils.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_utils.go index c9173840e3..0d5401df98 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_utils.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/instance_utils.go @@ -109,10 +109,19 @@ type AttachVolumeResponse struct { func volumesToVolumeTemplates(volumes map[string]*VolumeServer) map[string]*VolumeServerTemplate { volumeTemplates := map[string]*VolumeServerTemplate{} for key, volume := range volumes { - volumeTemplates[key] = &VolumeServerTemplate{ - ID: &volume.ID, - Name: &volume.Name, + volumeTemplate := &VolumeServerTemplate{ + ID: &volume.ID, } + + if volume.Name != "" { + volumeTemplate.Name = &volume.Name + } + + if volume.VolumeType == VolumeServerVolumeTypeSbsVolume { + volumeTemplate.VolumeType = VolumeVolumeTypeSbsVolume + } + + volumeTemplates[key] = volumeTemplate } return volumeTemplates } @@ -122,6 +131,15 @@ func volumesToVolumeTemplates(volumes map[string]*VolumeServer) map[string]*Volu // Note: Implementation is thread-safe. func (s *API) AttachVolume(req *AttachVolumeRequest, opts ...scw.RequestOption) (*AttachVolumeResponse, error) { defer lockServer(req.Zone, req.ServerID).Unlock() + // check where the volume comes from + volume, err := s.getUnknownVolume(&getUnknownVolumeRequest{ + Zone: req.Zone, + VolumeID: req.VolumeID, + }) + if err != nil { + return nil, err + } + // get server with volumes getServerResponse, err := s.GetServer(&GetServerRequest{ Zone: req.Zone, @@ -144,9 +162,13 @@ func (s *API) AttachVolume(req *AttachVolumeRequest, opts ...scw.RequestOption) if _, ok := newVolumes[key]; !ok { newVolumes[key] = &VolumeServerTemplate{ ID: &req.VolumeID, - // name is ignored on this PATCH - Name: &req.VolumeID, } + if volume.Type == VolumeVolumeTypeSbsVolume { + newVolumes[key].VolumeType = VolumeVolumeTypeSbsVolume + } else { + newVolumes[key].Name = &req.VolumeID + } + found = true break } @@ -173,6 +195,10 @@ func (s *API) AttachVolume(req *AttachVolumeRequest, opts ...scw.RequestOption) type DetachVolumeRequest struct { Zone scw.Zone `json:"-"` VolumeID string `json:"-"` + // IsBlockVolume should be set to true if volume is from block API, + // can be set to false if volume is from instance API, + // if left nil both API will be tried + IsBlockVolume *bool `json:"-"` } // DetachVolumeResponse contains the updated server after detaching a volume @@ -184,27 +210,23 @@ type DetachVolumeResponse struct { // // Note: Implementation is thread-safe. func (s *API) DetachVolume(req *DetachVolumeRequest, opts ...scw.RequestOption) (*DetachVolumeResponse, error) { - // get volume - getVolumeResponse, err := s.GetVolume(&GetVolumeRequest{ + volume, err := s.getUnknownVolume(&getUnknownVolumeRequest{ Zone: req.Zone, VolumeID: req.VolumeID, }) if err != nil { return nil, err } - if getVolumeResponse.Volume == nil { - return nil, errors.New("expected volume to have value in response") - } - if getVolumeResponse.Volume.Server == nil { + + if volume.ServerID == nil { return nil, errors.New("volume should be attached to a server") } - serverID := getVolumeResponse.Volume.Server.ID - defer lockServer(req.Zone, serverID).Unlock() + defer lockServer(req.Zone, *volume.ServerID).Unlock() // get server with volumes getServerResponse, err := s.GetServer(&GetServerRequest{ Zone: req.Zone, - ServerID: serverID, + ServerID: *volume.ServerID, }) if err != nil { return nil, err @@ -222,7 +244,7 @@ func (s *API) DetachVolume(req *DetachVolumeRequest, opts ...scw.RequestOption) // update server updateServerResponse, err := s.updateServer(&UpdateServerRequest{ Zone: req.Zone, - ServerID: serverID, + ServerID: *volume.ServerID, Volumes: &newVolumes, }) if err != nil { @@ -286,32 +308,6 @@ func (r *ListImagesResponse) UnsafeSetTotalCount(totalCount int) { r.TotalCount = uint32(totalCount) } -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListServersTypesResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListServersTypesResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListServersTypesResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - if r.Servers == nil { - r.Servers = make(map[string]*ServerType, len(results.Servers)) - } - - for name, serverType := range results.Servers { - r.Servers[name] = serverType - } - - r.TotalCount += uint32(len(results.Servers)) - return uint32(len(results.Servers)), nil -} - func (v *NullableStringValue) UnmarshalJSON(b []byte) error { if string(b) == "null" { v.Null = true @@ -436,29 +432,3 @@ func (s *API) WaitForMACAddress(req *WaitForMACAddressRequest, opts ...scw.Reque func (r *GetServerTypesAvailabilityResponse) UnsafeSetTotalCount(totalCount int) { r.TotalCount = uint32(totalCount) } - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *GetServerTypesAvailabilityResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *GetServerTypesAvailabilityResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*GetServerTypesAvailabilityResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - if r.Servers == nil { - r.Servers = make(map[string]*GetServerTypesAvailabilityResponseAvailability, len(results.Servers)) - } - - for name, serverTypeAvailability := range results.Servers { - r.Servers[name] = serverTypeAvailability - } - - r.TotalCount += uint32(len(results.Servers)) - return uint32(len(results.Servers)), nil -} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/security_group_utils.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/security_group_utils.go deleted file mode 100644 index 2a79ea5c08..0000000000 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/security_group_utils.go +++ /dev/null @@ -1,222 +0,0 @@ -package instance - -import ( - "fmt" - - "github.com/scaleway/scaleway-sdk-go/internal/errors" - "github.com/scaleway/scaleway-sdk-go/scw" -) - -// UpdateSecurityGroupRequest contains the parameters to update a security group -type UpdateSecurityGroupRequest struct { - Zone scw.Zone `json:"-"` - SecurityGroupID string `json:"-"` - - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - InboundDefaultPolicy *SecurityGroupPolicy `json:"inbound_default_policy,omitempty"` - OutboundDefaultPolicy *SecurityGroupPolicy `json:"outbound_default_policy,omitempty"` - Stateful *bool `json:"stateful,omitempty"` - OrganizationDefault *bool `json:"organization_default,omitempty"` - ProjectDefault *bool `json:"project_default,omitempty"` - EnableDefaultSecurity *bool `json:"enable_default_security,omitempty"` - Tags *[]string `json:"tags,omitempty"` -} - -type UpdateSecurityGroupResponse struct { - SecurityGroup *SecurityGroup -} - -// UpdateSecurityGroup updates a security group. -func (s *API) UpdateSecurityGroup(req *UpdateSecurityGroupRequest, opts ...scw.RequestOption) (*UpdateSecurityGroupResponse, error) { - var err error - - if req.Zone == "" { - defaultZone, _ := s.client.GetDefaultZone() - req.Zone = defaultZone - } - - if fmt.Sprint(req.Zone) == "" { - return nil, errors.New("field Zone cannot be empty in request") - } - - if fmt.Sprint(req.SecurityGroupID) == "" { - return nil, errors.New("field SecurityGroupID cannot be empty in request") - } - - getSGResponse, err := s.GetSecurityGroup(&GetSecurityGroupRequest{ - Zone: req.Zone, - SecurityGroupID: req.SecurityGroupID, - }, opts...) - if err != nil { - return nil, err - } - - setRequest := &setSecurityGroupRequest{ - ID: getSGResponse.SecurityGroup.ID, - Name: getSGResponse.SecurityGroup.Name, - Description: getSGResponse.SecurityGroup.Description, - Organization: getSGResponse.SecurityGroup.Organization, - Project: getSGResponse.SecurityGroup.Project, - OrganizationDefault: getSGResponse.SecurityGroup.OrganizationDefault, - ProjectDefault: getSGResponse.SecurityGroup.ProjectDefault, - OutboundDefaultPolicy: getSGResponse.SecurityGroup.OutboundDefaultPolicy, - InboundDefaultPolicy: getSGResponse.SecurityGroup.InboundDefaultPolicy, - Stateful: getSGResponse.SecurityGroup.Stateful, - Zone: req.Zone, - EnableDefaultSecurity: getSGResponse.SecurityGroup.EnableDefaultSecurity, - CreationDate: getSGResponse.SecurityGroup.CreationDate, - ModificationDate: getSGResponse.SecurityGroup.ModificationDate, - Servers: getSGResponse.SecurityGroup.Servers, - } - - // Override the values that need to be updated - if req.Name != nil { - setRequest.Name = *req.Name - } - if req.Description != nil { - setRequest.Description = *req.Description - } - if req.InboundDefaultPolicy != nil { - setRequest.InboundDefaultPolicy = *req.InboundDefaultPolicy - } - if req.OutboundDefaultPolicy != nil { - setRequest.OutboundDefaultPolicy = *req.OutboundDefaultPolicy - } - if req.Stateful != nil { - setRequest.Stateful = *req.Stateful - } - if req.OrganizationDefault != nil { - setRequest.OrganizationDefault = req.OrganizationDefault - } - if req.ProjectDefault != nil { - setRequest.ProjectDefault = *req.ProjectDefault - } - if req.EnableDefaultSecurity != nil { - setRequest.EnableDefaultSecurity = *req.EnableDefaultSecurity - } - if req.Tags != nil { - setRequest.Tags = req.Tags - } - - setRes, err := s.setSecurityGroup(setRequest, opts...) - if err != nil { - return nil, err - } - - return &UpdateSecurityGroupResponse{ - SecurityGroup: setRes.SecurityGroup, - }, nil -} - -// UpdateSecurityGroupRuleRequest contains the parameters to update a security group rule -type UpdateSecurityGroupRuleRequest struct { - Zone scw.Zone `json:"-"` - SecurityGroupID string `json:"-"` - SecurityGroupRuleID string `json:"-"` - - Protocol *SecurityGroupRuleProtocol `json:"protocol"` - Direction *SecurityGroupRuleDirection `json:"direction"` - Action *SecurityGroupRuleAction `json:"action"` - IPRange *scw.IPNet `json:"ip_range"` - Position *uint32 `json:"position"` - - // If set to 0, DestPortFrom will be removed. - // See SecurityGroupRule.DestPortFrom for more information - DestPortFrom *uint32 `json:"dest_port_from"` - - // If set to 0, DestPortTo will be removed. - // See SecurityGroupRule.DestPortTo for more information - DestPortTo *uint32 `json:"dest_port_to"` -} - -type UpdateSecurityGroupRuleResponse struct { - Rule *SecurityGroupRule `json:"security_rule"` -} - -// UpdateSecurityGroupRule updates a security group. -func (s *API) UpdateSecurityGroupRule(req *UpdateSecurityGroupRuleRequest, opts ...scw.RequestOption) (*UpdateSecurityGroupRuleResponse, error) { - var err error - - if fmt.Sprint(req.Zone) == "" { - defaultZone, _ := s.client.GetDefaultZone() - req.Zone = defaultZone - } - - if fmt.Sprint(req.Zone) == "" { - return nil, errors.New("field Zone cannot be empty in request") - } - - res, err := s.GetSecurityGroupRule(&GetSecurityGroupRuleRequest{ - SecurityGroupRuleID: req.SecurityGroupRuleID, - SecurityGroupID: req.SecurityGroupID, - Zone: req.Zone, - }) - if err != nil { - return nil, err - } - - setRequest := &setSecurityGroupRuleRequest{ - Zone: req.Zone, - SecurityGroupID: req.SecurityGroupID, - SecurityGroupRuleID: req.SecurityGroupRuleID, - ID: req.SecurityGroupRuleID, - Direction: res.Rule.Direction, - Protocol: res.Rule.Protocol, - DestPortFrom: res.Rule.DestPortFrom, - DestPortTo: res.Rule.DestPortTo, - IPRange: res.Rule.IPRange, - Action: res.Rule.Action, - Position: res.Rule.Position, - Editable: res.Rule.Editable, - } - - // Override the values that need to be updated - if req.Action != nil { - setRequest.Action = *req.Action - } - if req.IPRange != nil { - setRequest.IPRange = *req.IPRange - } - if req.DestPortTo != nil { - if *req.DestPortTo > 0 { - setRequest.DestPortTo = req.DestPortTo - } else { - setRequest.DestPortTo = nil - } - } - if req.DestPortFrom != nil { - if *req.DestPortFrom > 0 { - setRequest.DestPortFrom = req.DestPortFrom - } else { - setRequest.DestPortFrom = nil - } - } - if req.DestPortFrom != nil && req.DestPortTo != nil && *req.DestPortFrom == *req.DestPortTo { - setRequest.DestPortTo = nil - } - if req.Protocol != nil { - setRequest.Protocol = *req.Protocol - } - if req.Direction != nil { - setRequest.Direction = *req.Direction - } - if req.Position != nil { - setRequest.Position = *req.Position - } - - // When we use ICMP protocol portFrom and portTo should be set to nil - if req.Protocol != nil && *req.Protocol == SecurityGroupRuleProtocolICMP { - setRequest.DestPortFrom = nil - setRequest.DestPortTo = nil - } - - resp, err := s.setSecurityGroupRule(setRequest) - if err != nil { - return nil, err - } - - return &UpdateSecurityGroupRuleResponse{ - Rule: resp.Rule, - }, nil -} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/snapshot_utils.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/snapshot_utils.go index ec25e8af1c..6efc419d3b 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/snapshot_utils.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/snapshot_utils.go @@ -1,7 +1,6 @@ package instance import ( - "fmt" "time" "github.com/scaleway/scaleway-sdk-go/internal/async" @@ -55,68 +54,3 @@ func (s *API) WaitForSnapshot(req *WaitForSnapshotRequest, opts ...scw.RequestOp } return snapshot.(*Snapshot), nil } - -type UpdateSnapshotRequest struct { - Zone scw.Zone - SnapshotID string - Name *string `json:"name,omitempty"` - Tags *[]string `json:"tags,omitempty"` -} - -type UpdateSnapshotResponse struct { - Snapshot *Snapshot -} - -func (s *API) UpdateSnapshot(req *UpdateSnapshotRequest, opts ...scw.RequestOption) (*UpdateSnapshotResponse, error) { - var err error - - if req.Zone == "" { - defaultZone, _ := s.client.GetDefaultZone() - req.Zone = defaultZone - } - - if fmt.Sprint(req.Zone) == "" { - return nil, errors.New("field Zone cannot be empty in request") - } - - if fmt.Sprint(req.SnapshotID) == "" { - return nil, errors.New("field SnapshotID cannot be empty in request") - } - - getSnapshotResponse, err := s.GetSnapshot(&GetSnapshotRequest{ - Zone: req.Zone, - SnapshotID: req.SnapshotID, - }, opts...) - if err != nil { - return nil, err - } - - setRequest := &setSnapshotRequest{ - SnapshotID: getSnapshotResponse.Snapshot.ID, - Zone: getSnapshotResponse.Snapshot.Zone, - ID: getSnapshotResponse.Snapshot.ID, - Name: getSnapshotResponse.Snapshot.Name, - CreationDate: getSnapshotResponse.Snapshot.CreationDate, - ModificationDate: getSnapshotResponse.Snapshot.ModificationDate, - Organization: getSnapshotResponse.Snapshot.Organization, - Project: getSnapshotResponse.Snapshot.Project, - } - - // Override the values that need to be updated - if req.Name != nil { - setRequest.Name = *req.Name - } - - if req.Tags != nil { - setRequest.Tags = req.Tags - } - - setRes, err := s.setSnapshot(setRequest, opts...) - if err != nil { - return nil, err - } - - return &UpdateSnapshotResponse{ - Snapshot: setRes.Snapshot, - }, nil -} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/volume_utils.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/volume_utils.go index 710c3b5299..30b98c6e9c 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/volume_utils.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/instance/v1/volume_utils.go @@ -1,8 +1,10 @@ package instance import ( + goerrors "errors" "time" + block "github.com/scaleway/scaleway-sdk-go/api/block/v1alpha1" "github.com/scaleway/scaleway-sdk-go/internal/async" "github.com/scaleway/scaleway-sdk-go/internal/errors" "github.com/scaleway/scaleway-sdk-go/scw" @@ -54,3 +56,58 @@ func (s *API) WaitForVolume(req *WaitForVolumeRequest, opts ...scw.RequestOption } return volume.(*Volume), nil } + +type unknownVolume struct { + ID string + ServerID *string + Type VolumeVolumeType +} + +type getUnknownVolumeRequest struct { + Zone scw.Zone + VolumeID string + IsBlockVolume *bool +} + +// getUnknownVolume is used to get a volume that can be either from instance or block API +func (s *API) getUnknownVolume(req *getUnknownVolumeRequest, opts ...scw.RequestOption) (*unknownVolume, error) { + volume := &unknownVolume{ + ID: req.VolumeID, + } + + // Try instance API + if req.IsBlockVolume == nil || *req.IsBlockVolume == false { + getVolumeResponse, err := s.GetVolume(&GetVolumeRequest{ + Zone: req.Zone, + VolumeID: req.VolumeID, + }) + notFoundErr := &scw.ResourceNotFoundError{} + if err != nil && !goerrors.As(err, ¬FoundErr) { + return nil, err + } + + if getVolumeResponse != nil { + if getVolumeResponse.Volume != nil && getVolumeResponse.Volume.Server != nil { + volume.ServerID = &getVolumeResponse.Volume.Server.ID + } + volume.Type = getVolumeResponse.Volume.VolumeType + } + } + if volume.Type == "" && (req.IsBlockVolume == nil || *req.IsBlockVolume == true) { + getVolumeResponse, err := block.NewAPI(s.client).GetVolume(&block.GetVolumeRequest{ + Zone: req.Zone, + VolumeID: req.VolumeID, + }) + if err != nil { + return nil, err + } + for _, reference := range getVolumeResponse.References { + if reference.ProductResourceType == "instance_server" { + volume.ServerID = &reference.ProductResourceID + } + } + volume.Type = VolumeVolumeTypeSbsVolume + } + + return volume, nil +} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/ipam/v1alpha1/ipam_sdk.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/ipam/v1alpha1/ipam_sdk.go index a5876af8ce..524d81290e 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/ipam/v1alpha1/ipam_sdk.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/ipam/v1alpha1/ipam_sdk.go @@ -39,18 +39,6 @@ var ( _ = namegenerator.GetRandomName ) -// API: iPAM API. -type API struct { - client *scw.Client -} - -// NewAPI returns a API object from a Scaleway client. -func NewAPI(client *scw.Client) *API { - return &API{ - client: client, - } -} - type ListIPsRequestOrderBy string const ( @@ -97,6 +85,7 @@ const ( ResourceTypeVpcGateway = ResourceType("vpc_gateway") ResourceTypeVpcGatewayNetwork = ResourceType("vpc_gateway_network") ResourceTypeK8sNode = ResourceType("k8s_node") + ResourceTypeK8sCluster = ResourceType("k8s_cluster") ResourceTypeRdbInstance = ResourceType("rdb_instance") ResourceTypeRedisCluster = ResourceType("redis_cluster") ResourceTypeBaremetalServer = ResourceType("baremetal_server") @@ -126,6 +115,26 @@ func (enum *ResourceType) UnmarshalJSON(data []byte) error { return nil } +// Resource: resource. +type Resource struct { + // Type: default value: unknown_type + Type ResourceType `json:"type"` + + ID string `json:"id"` + + MacAddress *string `json:"mac_address"` + + Name *string `json:"name"` +} + +// Reverse: reverse. +type Reverse struct { + Hostname string `json:"hostname"` + + Address *scw.IPNet `json:"address"` +} + +// IP: ip. type IP struct { ID string `json:"id"` @@ -139,69 +148,32 @@ type IP struct { UpdatedAt *time.Time `json:"updated_at"` - // Precisely one of Regional, SubnetID, Zonal, ZonalNat must be set. + // Precisely one of Regional, Zonal, ZonalNat, SubnetID must be set. Regional *bool `json:"regional,omitempty"` - // Precisely one of Regional, SubnetID, Zonal, ZonalNat must be set. + // Precisely one of Regional, Zonal, ZonalNat, SubnetID must be set. Zonal *string `json:"zonal,omitempty"` - // Precisely one of Regional, SubnetID, Zonal, ZonalNat must be set. + // Precisely one of Regional, Zonal, ZonalNat, SubnetID must be set. ZonalNat *string `json:"zonal_nat,omitempty"` - // Precisely one of Regional, SubnetID, Zonal, ZonalNat must be set. + // Precisely one of Regional, Zonal, ZonalNat, SubnetID must be set. SubnetID *string `json:"subnet_id,omitempty"` Resource *Resource `json:"resource"` Tags []string `json:"tags"` + Reverses []*Reverse `json:"reverses"` + + // Region: region to target. If none is passed will use default region from the config. Region scw.Region `json:"region"` + // Zone: zone to target. If none is passed will use default zone from the config. Zone *scw.Zone `json:"zone"` } -type ListIPsResponse struct { - TotalCount uint64 `json:"total_count"` - - IPs []*IP `json:"ips"` -} - -type Resource struct { - // Type: default value: unknown_type - Type ResourceType `json:"type"` - - ID string `json:"id"` - - MacAddress *string `json:"mac_address"` - - Name *string `json:"name"` -} - -type Source struct { - - // Precisely one of PrivateNetworkID, Regional, SubnetID, Zonal, ZonalNat must be set. - Zonal *string `json:"zonal,omitempty"` - - // Precisely one of PrivateNetworkID, Regional, SubnetID, Zonal, ZonalNat must be set. - ZonalNat *string `json:"zonal_nat,omitempty"` - - // Precisely one of PrivateNetworkID, Regional, SubnetID, Zonal, ZonalNat must be set. - Regional *bool `json:"regional,omitempty"` - - // Precisely one of PrivateNetworkID, Regional, SubnetID, Zonal, ZonalNat must be set. - PrivateNetworkID *string `json:"private_network_id,omitempty"` - - // Precisely one of PrivateNetworkID, Regional, SubnetID, Zonal, ZonalNat must be set. - SubnetID *string `json:"subnet_id,omitempty"` -} - -// Service API - -// Regions list localities the api is available in -func (s *API) Regions() []scw.Region { - return []scw.Region{scw.RegionFrPar, scw.RegionNlAms, scw.RegionPlWaw} -} - +// ListIPsRequest: list i ps request. type ListIPsRequest struct { // Region: region to target. If none is passed will use default region from the config. Region scw.Region `json:"-"` @@ -209,6 +181,7 @@ type ListIPsRequest struct { Page *int32 `json:"-"` PageSize *uint32 `json:"-"` + // OrderBy: default value: created_at_desc OrderBy ListIPsRequestOrderBy `json:"-"` @@ -216,19 +189,25 @@ type ListIPsRequest struct { OrganizationID *string `json:"-"` - Zonal *string `json:"-"` + // Precisely one of Zonal, ZonalNat, Regional, PrivateNetworkID, SubnetID must be set. + Zonal *string `json:"zonal,omitempty"` - ZonalNat *string `json:"-"` + // Precisely one of Zonal, ZonalNat, Regional, PrivateNetworkID, SubnetID must be set. + ZonalNat *string `json:"zonal_nat,omitempty"` - Regional *bool `json:"-"` + // Precisely one of Zonal, ZonalNat, Regional, PrivateNetworkID, SubnetID must be set. + Regional *bool `json:"regional,omitempty"` - PrivateNetworkID *string `json:"-"` + // Precisely one of Zonal, ZonalNat, Regional, PrivateNetworkID, SubnetID must be set. + PrivateNetworkID *string `json:"private_network_id,omitempty"` - SubnetID *string `json:"-"` + // Precisely one of Zonal, ZonalNat, Regional, PrivateNetworkID, SubnetID must be set. + SubnetID *string `json:"subnet_id,omitempty"` Attached *bool `json:"-"` ResourceID *string `json:"-"` + // ResourceType: default value: unknown_type ResourceType ResourceType `json:"-"` @@ -243,58 +222,11 @@ type ListIPsRequest struct { ResourceIDs []string `json:"-"` } -// ListIPs: find IP addresses. -func (s *API) ListIPs(req *ListIPsRequest, opts ...scw.RequestOption) (*ListIPsResponse, error) { - var err error +// ListIPsResponse: list i ps response. +type ListIPsResponse struct { + TotalCount uint64 `json:"total_count"` - if req.Region == "" { - defaultRegion, _ := s.client.GetDefaultRegion() - req.Region = defaultRegion - } - - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "page", req.Page) - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "order_by", req.OrderBy) - parameter.AddToQuery(query, "project_id", req.ProjectID) - parameter.AddToQuery(query, "organization_id", req.OrganizationID) - parameter.AddToQuery(query, "zonal", req.Zonal) - parameter.AddToQuery(query, "zonal_nat", req.ZonalNat) - parameter.AddToQuery(query, "regional", req.Regional) - parameter.AddToQuery(query, "private_network_id", req.PrivateNetworkID) - parameter.AddToQuery(query, "subnet_id", req.SubnetID) - parameter.AddToQuery(query, "attached", req.Attached) - parameter.AddToQuery(query, "resource_id", req.ResourceID) - parameter.AddToQuery(query, "resource_type", req.ResourceType) - parameter.AddToQuery(query, "mac_address", req.MacAddress) - parameter.AddToQuery(query, "tags", req.Tags) - parameter.AddToQuery(query, "is_ipv6", req.IsIPv6) - parameter.AddToQuery(query, "resource_name", req.ResourceName) - parameter.AddToQuery(query, "resource_ids", req.ResourceIDs) - - if fmt.Sprint(req.Region) == "" { - return nil, errors.New("field Region cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/ipam/v1alpha1/regions/" + fmt.Sprint(req.Region) + "/ips", - Query: query, - Headers: http.Header{}, - } - - var resp ListIPsResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil + IPs []*IP `json:"ips"` } // UnsafeGetTotalCount should not be used @@ -315,3 +247,71 @@ func (r *ListIPsResponse) UnsafeAppend(res interface{}) (uint64, error) { r.TotalCount += uint64(len(results.IPs)) return uint64(len(results.IPs)), nil } + +// IPAM API. +type API struct { + client *scw.Client +} + +// NewAPI returns a API object from a Scaleway client. +func NewAPI(client *scw.Client) *API { + return &API{ + client: client, + } +} +func (s *API) Regions() []scw.Region { + return []scw.Region{scw.RegionFrPar, scw.RegionNlAms, scw.RegionPlWaw} +} + +// ListIPs: Find IP addresses. +func (s *API) ListIPs(req *ListIPsRequest, opts ...scw.RequestOption) (*ListIPsResponse, error) { + var err error + + if req.Region == "" { + defaultRegion, _ := s.client.GetDefaultRegion() + req.Region = defaultRegion + } + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + query := url.Values{} + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "project_id", req.ProjectID) + parameter.AddToQuery(query, "organization_id", req.OrganizationID) + parameter.AddToQuery(query, "attached", req.Attached) + parameter.AddToQuery(query, "resource_id", req.ResourceID) + parameter.AddToQuery(query, "resource_type", req.ResourceType) + parameter.AddToQuery(query, "mac_address", req.MacAddress) + parameter.AddToQuery(query, "tags", req.Tags) + parameter.AddToQuery(query, "is_ipv6", req.IsIPv6) + parameter.AddToQuery(query, "resource_name", req.ResourceName) + parameter.AddToQuery(query, "resource_ids", req.ResourceIDs) + parameter.AddToQuery(query, "zonal", req.Zonal) + parameter.AddToQuery(query, "zonal_nat", req.ZonalNat) + parameter.AddToQuery(query, "regional", req.Regional) + parameter.AddToQuery(query, "private_network_id", req.PrivateNetworkID) + parameter.AddToQuery(query, "subnet_id", req.SubnetID) + + if fmt.Sprint(req.Region) == "" { + return nil, errors.New("field Region cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/ipam/v1alpha1/regions/" + fmt.Sprint(req.Region) + "/ips", + Query: query, + } + + var resp ListIPsResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/lb/v1/lb_sdk.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/lb/v1/lb_sdk.go index 265340397a..78816a0d01 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/lb/v1/lb_sdk.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/lb/v1/lb_sdk.go @@ -39,32 +39,6 @@ var ( _ = namegenerator.GetRandomName ) -// ZonedAPI: this API allows you to manage your Scaleway Load Balancer services. -// Load Balancer API. -type ZonedAPI struct { - client *scw.Client -} - -// NewZonedAPI returns a ZonedAPI object from a Scaleway client. -func NewZonedAPI(client *scw.Client) *ZonedAPI { - return &ZonedAPI{ - client: client, - } -} - -// API: this API allows you to manage your load balancer service. -// Load balancer API. -type API struct { - client *scw.Client -} - -// Deprecated NewAPI returns a API object from a Scaleway client. -func NewAPI(client *scw.Client) *API { - return &API{ - client: client, - } -} - type ACLActionRedirectRedirectType string const ( @@ -766,7 +740,6 @@ func (enum *Protocol) UnmarshalJSON(data []byte) error { return nil } -// ProxyProtocol: pROXY protocol to use between the Load Balancer and backend servers. Allows the backend servers to be informed of the client's real IP address. PROXY protocol must be supported by the backend servers' software. For more information on the different protocols available, see the [dedicated documentation](https://www.scaleway.com/en/docs/network/load-balancer/reference-content/configuring-load-balancer/#choosing-a-proxy-protocol). type ProxyProtocol string const ( @@ -864,35 +837,288 @@ func (enum *StickySessionsType) UnmarshalJSON(data []byte) error { return nil } -// ACL: acl. -type ACL struct { - // ID: ACL ID. - ID string `json:"id"` - // Name: ACL name. - Name string `json:"name"` - // Match: ACL match filter object. One of `ip_subnet` or `http_filter` & `http_filter_value` are required. - Match *ACLMatch `json:"match"` - // Action: action to take when incoming traffic matches an ACL filter. - Action *ACLAction `json:"action"` - // Frontend: ACL is attached to this frontend object. - Frontend *Frontend `json:"frontend"` - // Index: priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed). - Index int32 `json:"index"` - // CreatedAt: date on which the ACL was created. - CreatedAt *time.Time `json:"created_at"` - // UpdatedAt: date on which the ACL was last updated. - UpdatedAt *time.Time `json:"updated_at"` - // Description: ACL description. - Description string `json:"description"` +// SubscriberEmailConfig: subscriber email config. +type SubscriberEmailConfig struct { + // Email: email address to send alerts to. + Email string `json:"email"` } -// ACLAction: acl action. -type ACLAction struct { - // Type: action to take when incoming traffic matches an ACL filter. - // Default value: allow - Type ACLActionType `json:"type"` - // Redirect: redirection parameters when using an ACL with a `redirect` action. - Redirect *ACLActionRedirect `json:"redirect"` +// SubscriberWebhookConfig: Webhook alert of subscriber. +type SubscriberWebhookConfig struct { + // URI: URI to receive POST requests. + URI string `json:"uri"` +} + +// HealthCheckHTTPConfig: health check http config. +type HealthCheckHTTPConfig struct { + // URI: the HTTP URI to use when performing a health check on backend servers. + URI string `json:"uri"` + + // Method: the HTTP method used when performing a health check on backend servers. + Method string `json:"method"` + + // Code: the HTTP response code that should be returned for a health check to be considered successful. + Code *int32 `json:"code"` + + // HostHeader: the HTTP host header used when performing a health check on backend servers. + HostHeader string `json:"host_header"` +} + +// HealthCheckHTTPSConfig: health check https config. +type HealthCheckHTTPSConfig struct { + // URI: the HTTP URI to use when performing a health check on backend servers. + URI string `json:"uri"` + + // Method: the HTTP method used when performing a health check on backend servers. + Method string `json:"method"` + + // Code: the HTTP response code that should be returned for a health check to be considered successful. + Code *int32 `json:"code"` + + // HostHeader: the HTTP host header used when performing a health check on backend servers. + HostHeader string `json:"host_header"` + + // Sni: the SNI value used when performing a health check on backend servers over SSL. + Sni string `json:"sni"` +} + +// HealthCheckLdapConfig: health check ldap config. +type HealthCheckLdapConfig struct { +} + +// HealthCheckMysqlConfig: health check mysql config. +type HealthCheckMysqlConfig struct { + // User: mySQL user to use for the health check. + User string `json:"user"` +} + +// HealthCheckPgsqlConfig: health check pgsql config. +type HealthCheckPgsqlConfig struct { + // User: postgreSQL user to use for the health check. + User string `json:"user"` +} + +// HealthCheckRedisConfig: health check redis config. +type HealthCheckRedisConfig struct { +} + +// HealthCheckTCPConfig: health check tcp config. +type HealthCheckTCPConfig struct { +} + +// IP: ip. +type IP struct { + // ID: IP address ID. + ID string `json:"id"` + + // IPAddress: IP address. + IPAddress string `json:"ip_address"` + + // OrganizationID: organization ID of the Scaleway Organization the IP address is in. + OrganizationID string `json:"organization_id"` + + // ProjectID: project ID of the Scaleway Project the IP address is in. + ProjectID string `json:"project_id"` + + // LBID: load Balancer ID. + LBID *string `json:"lb_id"` + + // Reverse: reverse DNS (domain name) of the IP address. + Reverse string `json:"reverse"` + + // Deprecated: Region: the region the IP address is in. + Region *scw.Region `json:"region,omitempty"` + + // Zone: the zone the IP address is in. + Zone scw.Zone `json:"zone"` +} + +// Instance: instance. +type Instance struct { + // ID: underlying Instance ID. + ID string `json:"id"` + + // Status: instance status. + // Default value: unknown + Status InstanceStatus `json:"status"` + + // IPAddress: instance IP address. + IPAddress string `json:"ip_address"` + + // CreatedAt: date on which the Instance was created. + CreatedAt *time.Time `json:"created_at"` + + // UpdatedAt: date on which the Instance was last updated. + UpdatedAt *time.Time `json:"updated_at"` + + // Deprecated: Region: the region the Instance is in. + Region *scw.Region `json:"region,omitempty"` + + // Zone: the zone the Instance is in. + Zone scw.Zone `json:"zone"` +} + +// Subscriber: Subscriber. +type Subscriber struct { + // ID: subscriber ID. + ID string `json:"id"` + + // Name: subscriber name. + Name string `json:"name"` + + // EmailConfig: email address of subscriber. + // Precisely one of EmailConfig, WebhookConfig must be set. + EmailConfig *SubscriberEmailConfig `json:"email_config,omitempty"` + + // WebhookConfig: webhook URI of subscriber. + // Precisely one of EmailConfig, WebhookConfig must be set. + WebhookConfig *SubscriberWebhookConfig `json:"webhook_config,omitempty"` +} + +// HealthCheck: health check. +type HealthCheck struct { + // Port: port to use for the backend server health check. + Port int32 `json:"port"` + + // CheckDelay: time to wait between two consecutive health checks. + CheckDelay *time.Duration `json:"check_delay"` + + // CheckTimeout: maximum time a backend server has to reply to the health check. + CheckTimeout *time.Duration `json:"check_timeout"` + + // CheckMaxRetries: number of consecutive unsuccessful health checks after which the server will be considered dead. + CheckMaxRetries int32 `json:"check_max_retries"` + + // TCPConfig: object to configure a basic TCP health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + TCPConfig *HealthCheckTCPConfig `json:"tcp_config,omitempty"` + + // MysqlConfig: object to configure a MySQL health check. The check requires MySQL >=3.22, for older versions, use a TCP health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + MysqlConfig *HealthCheckMysqlConfig `json:"mysql_config,omitempty"` + + // PgsqlConfig: object to configure a PostgreSQL health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + PgsqlConfig *HealthCheckPgsqlConfig `json:"pgsql_config,omitempty"` + + // LdapConfig: object to configure an LDAP health check. The response is analyzed to find the LDAPv3 response message. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + LdapConfig *HealthCheckLdapConfig `json:"ldap_config,omitempty"` + + // RedisConfig: object to configure a Redis health check. The response is analyzed to find the +PONG response message. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + RedisConfig *HealthCheckRedisConfig `json:"redis_config,omitempty"` + + // HTTPConfig: object to configure an HTTP health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + HTTPConfig *HealthCheckHTTPConfig `json:"http_config,omitempty"` + + // HTTPSConfig: object to configure an HTTPS health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + HTTPSConfig *HealthCheckHTTPSConfig `json:"https_config,omitempty"` + + // CheckSendProxy: defines whether proxy protocol should be activated for the health check. + CheckSendProxy bool `json:"check_send_proxy"` + + // TransientCheckDelay: time to wait between two consecutive health checks when a backend server is in a transient state (going UP or DOWN). + TransientCheckDelay *scw.Duration `json:"transient_check_delay"` +} + +func (m *HealthCheck) UnmarshalJSON(b []byte) error { + type tmpType HealthCheck + tmp := struct { + tmpType + TmpCheckDelay *marshaler.Duration `json:"check_delay"` + TmpCheckTimeout *marshaler.Duration `json:"check_timeout"` + }{} + err := json.Unmarshal(b, &tmp) + if err != nil { + return err + } + + *m = HealthCheck(tmp.tmpType) + m.CheckDelay = tmp.TmpCheckDelay.Standard() + m.CheckTimeout = tmp.TmpCheckTimeout.Standard() + return nil +} + +func (m HealthCheck) MarshalJSON() ([]byte, error) { + type tmpType HealthCheck + tmp := struct { + tmpType + TmpCheckDelay *marshaler.Duration `json:"check_delay"` + TmpCheckTimeout *marshaler.Duration `json:"check_timeout"` + }{ + tmpType: tmpType(m), + TmpCheckDelay: marshaler.NewDuration(m.CheckDelay), + TmpCheckTimeout: marshaler.NewDuration(m.CheckTimeout), + } + return json.Marshal(tmp) +} + +// LB: lb. +type LB struct { + // ID: underlying Instance ID. + ID string `json:"id"` + + // Name: load Balancer name. + Name string `json:"name"` + + // Description: load Balancer description. + Description string `json:"description"` + + // Status: load Balancer status. + // Default value: unknown + Status LBStatus `json:"status"` + + // Instances: list of underlying Instances. + Instances []*Instance `json:"instances"` + + // OrganizationID: scaleway Organization ID. + OrganizationID string `json:"organization_id"` + + // ProjectID: scaleway Project ID. + ProjectID string `json:"project_id"` + + // IP: list of IP addresses attached to the Load Balancer. + IP []*IP `json:"ip"` + + // Tags: load Balancer tags. + Tags []string `json:"tags"` + + // FrontendCount: number of frontends the Load Balancer has. + FrontendCount int32 `json:"frontend_count"` + + // BackendCount: number of backends the Load Balancer has. + BackendCount int32 `json:"backend_count"` + + // Type: load Balancer offer type. + Type string `json:"type"` + + // Subscriber: subscriber information. + Subscriber *Subscriber `json:"subscriber"` + + // SslCompatibilityLevel: determines the minimal SSL version which needs to be supported on client side. + // Default value: ssl_compatibility_level_unknown + SslCompatibilityLevel SSLCompatibilityLevel `json:"ssl_compatibility_level"` + + // CreatedAt: date on which the Load Balancer was created. + CreatedAt *time.Time `json:"created_at"` + + // UpdatedAt: date on which the Load Balancer was last updated. + UpdatedAt *time.Time `json:"updated_at"` + + // PrivateNetworkCount: number of Private Networks attached to the Load Balancer. + PrivateNetworkCount int32 `json:"private_network_count"` + + // RouteCount: number of routes configured on the Load Balancer. + RouteCount int32 `json:"route_count"` + + // Deprecated: Region: the region the Load Balancer is in. + Region *scw.Region `json:"region,omitempty"` + + // Zone: the zone the Load Balancer is in. + Zone scw.Zone `json:"zone"` } // ACLActionRedirect: acl action redirect. @@ -900,99 +1126,93 @@ type ACLActionRedirect struct { // Type: redirect type. // Default value: location Type ACLActionRedirectRedirectType `json:"type"` + // Target: redirect target. For a location redirect, you can use a URL e.g. `https://scaleway.com`. Using a scheme name (e.g. `https`, `http`, `ftp`, `git`) will replace the request's original scheme. This can be useful to implement HTTP to HTTPS redirects. Valid placeholders that can be used in a `location` redirect to preserve parts of the original request in the redirection URL are \{\{host\}\}, \{\{query\}\}, \{\{path\}\} and \{\{scheme\}\}. Target string `json:"target"` + // Code: HTTP redirect code to use. Valid values are 301, 302, 303, 307 and 308. Default value is 302. Code *int32 `json:"code"` } -// ACLMatch: acl match. -type ACLMatch struct { - // IPSubnet: list of IPs or CIDR v4/v6 addresses to filter for from the client side. - IPSubnet []*string `json:"ip_subnet"` - // HTTPFilter: type of HTTP filter to match. Extracts the request's URL path, which starts at the first slash and ends before the question mark (without the host part). Defines where to filter for the http_filter_value. Only supported for HTTP backends. - // Default value: acl_http_filter_none - HTTPFilter ACLHTTPFilter `json:"http_filter"` - // HTTPFilterValue: list of values to filter for. - HTTPFilterValue []*string `json:"http_filter_value"` - // HTTPFilterOption: name of the HTTP header to filter on if `http_header_match` was selected in `http_filter`. - HTTPFilterOption *string `json:"http_filter_option"` - // Invert: defines whether to invert the match condition. If set to `true`, the ACL carries out its action when the condition DOES NOT match. - Invert bool `json:"invert"` -} - -// ACLSpec: acl spec. -type ACLSpec struct { - // Name: ACL name. - Name string `json:"name"` - // Action: action to take when incoming traffic matches an ACL filter. - Action *ACLAction `json:"action"` - // Match: ACL match filter object. One of `ip_subnet` or `http_filter` and `http_filter_value` are required. - Match *ACLMatch `json:"match"` - // Index: priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed). - Index int32 `json:"index"` - // Description: ACL description. - Description string `json:"description"` -} - // Backend: backend. type Backend struct { // ID: backend ID. ID string `json:"id"` + // Name: name of the backend. Name string `json:"name"` + // ForwardProtocol: protocol used by the backend when forwarding traffic to backend servers. // Default value: tcp ForwardProtocol Protocol `json:"forward_protocol"` + // ForwardPort: port used by the backend when forwarding traffic to backend servers. ForwardPort int32 `json:"forward_port"` + // ForwardPortAlgorithm: load balancing algorithm to use when determining which backend server to forward new traffic to. // Default value: roundrobin ForwardPortAlgorithm ForwardPortAlgorithm `json:"forward_port_algorithm"` + // StickySessions: defines whether sticky sessions (binding a particular session to a particular backend server) are activated and the method to use if so. None disables sticky sessions. Cookie-based uses an HTTP cookie to stick a session to a backend server. Table-based uses the source (client) IP address to stick a session to a backend server. // Default value: none StickySessions StickySessionsType `json:"sticky_sessions"` + // StickySessionsCookieName: cookie name for cookie-based sticky sessions. StickySessionsCookieName string `json:"sticky_sessions_cookie_name"` + // HealthCheck: object defining the health check to be carried out by the backend when checking the status and health of backend servers. HealthCheck *HealthCheck `json:"health_check"` + // Pool: list of IP addresses of backend servers attached to this backend. Pool []string `json:"pool"` + // LB: load Balancer the backend is attached to. LB *LB `json:"lb"` + // Deprecated: SendProxyV2: deprecated in favor of proxy_protocol field. SendProxyV2 *bool `json:"send_proxy_v2,omitempty"` + // TimeoutServer: maximum allowed time for a backend server to process a request. - // Default value: 300000 TimeoutServer *time.Duration `json:"timeout_server"` + // TimeoutConnect: maximum allowed time for establishing a connection to a backend server. - // Default value: 5000 TimeoutConnect *time.Duration `json:"timeout_connect"` + // TimeoutTunnel: maximum allowed tunnel inactivity time after Websocket is established (takes precedence over client and server timeout). - // Default value: 900000 TimeoutTunnel *time.Duration `json:"timeout_tunnel"` + // OnMarkedDownAction: action to take when a backend server is marked as down. // Default value: on_marked_down_action_none OnMarkedDownAction OnMarkedDownAction `json:"on_marked_down_action"` + // ProxyProtocol: protocol to use between the Load Balancer and backend servers. Allows the backend servers to be informed of the client's real IP address. The PROXY protocol must be supported by the backend servers' software. // Default value: proxy_protocol_unknown ProxyProtocol ProxyProtocol `json:"proxy_protocol"` + // CreatedAt: date at which the backend was created. CreatedAt *time.Time `json:"created_at"` + // UpdatedAt: date at which the backend was updated. UpdatedAt *time.Time `json:"updated_at"` + // FailoverHost: scaleway S3 bucket website to be served as failover if all backend servers are down, e.g. failover-website.s3-website.fr-par.scw.cloud. FailoverHost *string `json:"failover_host"` + // SslBridging: defines whether to enable SSL bridging between the Load Balancer and backend servers. SslBridging *bool `json:"ssl_bridging"` + // IgnoreSslServerVerify: defines whether the server certificate verification should be ignored. IgnoreSslServerVerify *bool `json:"ignore_ssl_server_verify"` + // RedispatchAttemptCount: whether to use another backend server on each attempt. RedispatchAttemptCount *int32 `json:"redispatch_attempt_count"` + // MaxRetries: number of retries when a backend server connection failed. MaxRetries *int32 `json:"max_retries"` + // MaxConnections: maximum number of connections allowed per backend server. MaxConnections *int32 `json:"max_connections"` + // TimeoutQueue: maximum time for a request to be left pending in queue when `max_connections` is reached. TimeoutQueue *scw.Duration `json:"timeout_queue"` } @@ -1001,7 +1221,6 @@ func (m *Backend) UnmarshalJSON(b []byte) error { type tmpType Backend tmp := struct { tmpType - TmpTimeoutServer *marshaler.Duration `json:"timeout_server"` TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect"` TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel"` @@ -1012,7 +1231,6 @@ func (m *Backend) UnmarshalJSON(b []byte) error { } *m = Backend(tmp.tmpType) - m.TimeoutServer = tmp.TmpTimeoutServer.Standard() m.TimeoutConnect = tmp.TmpTimeoutConnect.Standard() m.TimeoutTunnel = tmp.TmpTimeoutTunnel.Standard() @@ -1023,13 +1241,11 @@ func (m Backend) MarshalJSON() ([]byte, error) { type tmpType Backend tmp := struct { tmpType - TmpTimeoutServer *marshaler.Duration `json:"timeout_server"` TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect"` TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel"` }{ - tmpType: tmpType(m), - + tmpType: tmpType(m), TmpTimeoutServer: marshaler.NewDuration(m.TimeoutServer), TmpTimeoutConnect: marshaler.NewDuration(m.TimeoutConnect), TmpTimeoutTunnel: marshaler.NewDuration(m.TimeoutTunnel), @@ -1037,93 +1253,111 @@ func (m Backend) MarshalJSON() ([]byte, error) { return json.Marshal(tmp) } -// BackendServerStats: backend server stats. -type BackendServerStats struct { - // InstanceID: ID of your Load Balancer's underlying Instance. - InstanceID string `json:"instance_id"` - // BackendID: backend ID. - BackendID string `json:"backend_id"` - // IP: iPv4 or IPv6 address of the backend server. - IP string `json:"ip"` - // ServerState: server operational state (stopped/starting/running/stopping). - // Default value: stopped - ServerState BackendServerStatsServerState `json:"server_state"` - // ServerStateChangedAt: time since last operational change. - ServerStateChangedAt *time.Time `json:"server_state_changed_at"` - // LastHealthCheckStatus: last health check status (unknown/neutral/failed/passed/condpass). - // Default value: unknown - LastHealthCheckStatus BackendServerStatsHealthCheckStatus `json:"last_health_check_status"` -} - // Certificate: certificate. type Certificate struct { // Type: certificate type (Let's Encrypt or custom). // Default value: letsencryt Type CertificateType `json:"type"` + // ID: certificate ID. ID string `json:"id"` + // CommonName: main domain name of certificate. CommonName string `json:"common_name"` + // SubjectAlternativeName: alternative domain names. SubjectAlternativeName []string `json:"subject_alternative_name"` + // Fingerprint: identifier (SHA-1) of the certificate. Fingerprint string `json:"fingerprint"` + // NotValidBefore: lower validity bound. NotValidBefore *time.Time `json:"not_valid_before"` + // NotValidAfter: upper validity bound. NotValidAfter *time.Time `json:"not_valid_after"` + // Status: certificate status. // Default value: pending Status CertificateStatus `json:"status"` + // LB: load Balancer object the certificate is attached to. LB *LB `json:"lb"` + // Name: certificate name. Name string `json:"name"` + // CreatedAt: date on which the certificate was created. CreatedAt *time.Time `json:"created_at"` + // UpdatedAt: date on which the certificate was last updated. UpdatedAt *time.Time `json:"updated_at"` + // StatusDetails: additional information about the certificate status (useful in case of certificate generation failure, for example). StatusDetails *string `json:"status_details"` } -// CreateCertificateRequestCustomCertificate: create certificate request. custom certificate. -type CreateCertificateRequestCustomCertificate struct { - // CertificateChain: full PEM-formatted certificate, consisting of the entire certificate chain including public key, private key, and (optionally) Certificate Authorities. - CertificateChain string `json:"certificate_chain"` +// ACLAction: acl action. +type ACLAction struct { + // Type: action to take when incoming traffic matches an ACL filter. + // Default value: allow + Type ACLActionType `json:"type"` + + // Redirect: redirection parameters when using an ACL with a `redirect` action. + Redirect *ACLActionRedirect `json:"redirect"` } -// CreateCertificateRequestLetsencryptConfig: create certificate request. letsencrypt config. -type CreateCertificateRequestLetsencryptConfig struct { - // CommonName: main domain name of certificate (this domain must exist and resolve to your Load Balancer IP address). - CommonName string `json:"common_name"` - // SubjectAlternativeName: alternative domain names (all domain names must exist and resolve to your Load Balancer IP address). - SubjectAlternativeName []string `json:"subject_alternative_name"` +// ACLMatch: acl match. +type ACLMatch struct { + // IPSubnet: list of IPs or CIDR v4/v6 addresses to filter for from the client side. + IPSubnet []*string `json:"ip_subnet"` + + // HTTPFilter: type of HTTP filter to match. Extracts the request's URL path, which starts at the first slash and ends before the question mark (without the host part). Defines where to filter for the http_filter_value. Only supported for HTTP backends. + // Default value: acl_http_filter_none + HTTPFilter ACLHTTPFilter `json:"http_filter"` + + // HTTPFilterValue: list of values to filter for. + HTTPFilterValue []*string `json:"http_filter_value"` + + // HTTPFilterOption: name of the HTTP header to filter on if `http_header_match` was selected in `http_filter`. + HTTPFilterOption *string `json:"http_filter_option"` + + // Invert: defines whether to invert the match condition. If set to `true`, the ACL carries out its action when the condition DOES NOT match. + Invert bool `json:"invert"` } // Frontend: frontend. type Frontend struct { // ID: frontend ID. ID string `json:"id"` + // Name: name of the frontend. Name string `json:"name"` + // InboundPort: port the frontend listens on. InboundPort int32 `json:"inbound_port"` + // Backend: backend object the frontend is attached to. Backend *Backend `json:"backend"` + // LB: load Balancer object the frontend is attached to. LB *LB `json:"lb"` + // TimeoutClient: maximum allowed inactivity time on the client side. - // Default value: 300000 TimeoutClient *time.Duration `json:"timeout_client"` + // Deprecated: Certificate: certificate, deprecated in favor of certificate_ids array. Certificate *Certificate `json:"certificate,omitempty"` + // CertificateIDs: list of SSL/TLS certificate IDs to bind to the frontend. CertificateIDs []string `json:"certificate_ids"` + // CreatedAt: date on which the frontend was created. CreatedAt *time.Time `json:"created_at"` + // UpdatedAt: date on which the frontend was last updated. UpdatedAt *time.Time `json:"updated_at"` + // EnableHTTP3: defines whether to enable HTTP/3 protocol on the frontend. EnableHTTP3 bool `json:"enable_http3"` } @@ -1132,7 +1366,6 @@ func (m *Frontend) UnmarshalJSON(b []byte) error { type tmpType Frontend tmp := struct { tmpType - TmpTimeoutClient *marshaler.Duration `json:"timeout_client"` }{} err := json.Unmarshal(b, &tmp) @@ -1141,7 +1374,6 @@ func (m *Frontend) UnmarshalJSON(b []byte) error { } *m = Frontend(tmp.tmpType) - m.TimeoutClient = tmp.TmpTimeoutClient.Standard() return nil } @@ -1150,231 +1382,705 @@ func (m Frontend) MarshalJSON() ([]byte, error) { type tmpType Frontend tmp := struct { tmpType - TmpTimeoutClient *marshaler.Duration `json:"timeout_client"` }{ - tmpType: tmpType(m), - + tmpType: tmpType(m), TmpTimeoutClient: marshaler.NewDuration(m.TimeoutClient), } return json.Marshal(tmp) } -// HealthCheck: health check. -type HealthCheck struct { - // Port: port to use for the backend server health check. - Port int32 `json:"port"` - // CheckDelay: time to wait between two consecutive health checks. - // Default value: 3000 - CheckDelay *time.Duration `json:"check_delay"` - // CheckTimeout: maximum time a backend server has to reply to the health check. - // Default value: 1000 - CheckTimeout *time.Duration `json:"check_timeout"` - // CheckMaxRetries: number of consecutive unsuccessful health checks after which the server will be considered dead. - CheckMaxRetries int32 `json:"check_max_retries"` - // TCPConfig: object to configure a basic TCP health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - TCPConfig *HealthCheckTCPConfig `json:"tcp_config,omitempty"` - // MysqlConfig: object to configure a MySQL health check. The check requires MySQL >=3.22, for older versions, use a TCP health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - MysqlConfig *HealthCheckMysqlConfig `json:"mysql_config,omitempty"` - // PgsqlConfig: object to configure a PostgreSQL health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - PgsqlConfig *HealthCheckPgsqlConfig `json:"pgsql_config,omitempty"` - // LdapConfig: object to configure an LDAP health check. The response is analyzed to find the LDAPv3 response message. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - LdapConfig *HealthCheckLdapConfig `json:"ldap_config,omitempty"` - // RedisConfig: object to configure a Redis health check. The response is analyzed to find the +PONG response message. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - RedisConfig *HealthCheckRedisConfig `json:"redis_config,omitempty"` - // HTTPConfig: object to configure an HTTP health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - HTTPConfig *HealthCheckHTTPConfig `json:"http_config,omitempty"` - // HTTPSConfig: object to configure an HTTPS health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - HTTPSConfig *HealthCheckHTTPSConfig `json:"https_config,omitempty"` - // CheckSendProxy: defines whether proxy protocol should be activated for the health check. - CheckSendProxy bool `json:"check_send_proxy"` - // TransientCheckDelay: time to wait between two consecutive health checks when a backend server is in a transient state (going UP or DOWN). - // Default value: 0.5s - TransientCheckDelay *scw.Duration `json:"transient_check_delay"` +// PrivateNetworkDHCPConfig: private network dhcp config. +type PrivateNetworkDHCPConfig struct { + // Deprecated + IPID *string `json:"ip_id,omitempty"` } -func (m *HealthCheck) UnmarshalJSON(b []byte) error { - type tmpType HealthCheck +// PrivateNetworkIpamConfig: private network ipam config. +type PrivateNetworkIpamConfig struct { +} + +// PrivateNetworkStaticConfig: private network static config. +type PrivateNetworkStaticConfig struct { + // Deprecated: IPAddress: array of a local IP address for the Load Balancer on this Private Network. + IPAddress *[]string `json:"ip_address,omitempty"` +} + +// RouteMatch: route match. +type RouteMatch struct { + // Sni: value to match in the Server Name Indication TLS extension (SNI) field from an incoming connection made via an SSL/TLS transport layer. This field should be set for routes on TCP Load Balancers. + // Precisely one of Sni, HostHeader must be set. + Sni *string `json:"sni,omitempty"` + + // HostHeader: value to match in the HTTP Host request header from an incoming connection. This field should be set for routes on HTTP Load Balancers. + // Precisely one of Sni, HostHeader must be set. + HostHeader *string `json:"host_header,omitempty"` +} + +// CreateCertificateRequestCustomCertificate: create certificate request custom certificate. +type CreateCertificateRequestCustomCertificate struct { + // CertificateChain: full PEM-formatted certificate, consisting of the entire certificate chain including public key, private key, and (optionally) Certificate Authorities. + CertificateChain string `json:"certificate_chain"` +} + +// CreateCertificateRequestLetsencryptConfig: create certificate request letsencrypt config. +type CreateCertificateRequestLetsencryptConfig struct { + // CommonName: main domain name of certificate (this domain must exist and resolve to your Load Balancer IP address). + CommonName string `json:"common_name"` + + // SubjectAlternativeName: alternative domain names (all domain names must exist and resolve to your Load Balancer IP address). + SubjectAlternativeName []string `json:"subject_alternative_name"` +} + +// BackendServerStats: backend server stats. +type BackendServerStats struct { + // InstanceID: ID of your Load Balancer's underlying Instance. + InstanceID string `json:"instance_id"` + + // BackendID: backend ID. + BackendID string `json:"backend_id"` + + // IP: iPv4 or IPv6 address of the backend server. + IP string `json:"ip"` + + // ServerState: server operational state (stopped/starting/running/stopping). + // Default value: stopped + ServerState BackendServerStatsServerState `json:"server_state"` + + // ServerStateChangedAt: time since last operational change. + ServerStateChangedAt *time.Time `json:"server_state_changed_at"` + + // LastHealthCheckStatus: last health check status (unknown/neutral/failed/passed/condpass). + // Default value: unknown + LastHealthCheckStatus BackendServerStatsHealthCheckStatus `json:"last_health_check_status"` +} + +// ACL: acl. +type ACL struct { + // ID: ACL ID. + ID string `json:"id"` + + // Name: ACL name. + Name string `json:"name"` + + // Match: ACL match filter object. One of `ip_subnet` or `http_filter` & `http_filter_value` are required. + Match *ACLMatch `json:"match"` + + // Action: action to take when incoming traffic matches an ACL filter. + Action *ACLAction `json:"action"` + + // Frontend: ACL is attached to this frontend object. + Frontend *Frontend `json:"frontend"` + + // Index: priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed). + Index int32 `json:"index"` + + // CreatedAt: date on which the ACL was created. + CreatedAt *time.Time `json:"created_at"` + + // UpdatedAt: date on which the ACL was last updated. + UpdatedAt *time.Time `json:"updated_at"` + + // Description: ACL description. + Description string `json:"description"` +} + +// PrivateNetwork: private network. +type PrivateNetwork struct { + // LB: load Balancer object which is attached to the Private Network. + LB *LB `json:"lb"` + + // IpamIDs: iPAM IDs of the booked IP addresses. + IpamIDs []string `json:"ipam_ids"` + + // Deprecated: StaticConfig: object containing an array of a local IP address for the Load Balancer on this Private Network. + // Precisely one of StaticConfig, DHCPConfig, IpamConfig must be set. + StaticConfig *PrivateNetworkStaticConfig `json:"static_config,omitempty"` + + // Deprecated: DHCPConfig: object containing DHCP-assigned IP addresses. + // Precisely one of StaticConfig, DHCPConfig, IpamConfig must be set. + DHCPConfig *PrivateNetworkDHCPConfig `json:"dhcp_config,omitempty"` + + // Deprecated: IpamConfig: for internal use only. + // Precisely one of StaticConfig, DHCPConfig, IpamConfig must be set. + IpamConfig *PrivateNetworkIpamConfig `json:"ipam_config,omitempty"` + + // PrivateNetworkID: private Network ID. + PrivateNetworkID string `json:"private_network_id"` + + // Status: status of Private Network connection. + // Default value: unknown + Status PrivateNetworkStatus `json:"status"` + + // CreatedAt: date on which the Private Network was created. + CreatedAt *time.Time `json:"created_at"` + + // UpdatedAt: date on which the PN was last updated. + UpdatedAt *time.Time `json:"updated_at"` +} + +// LBType: lb type. +type LBType struct { + // Name: load Balancer commercial offer type name. + Name string `json:"name"` + + // StockStatus: current stock status for a given Load Balancer type. + // Default value: unknown + StockStatus LBTypeStock `json:"stock_status"` + + // Description: load Balancer commercial offer type description. + Description string `json:"description"` + + // Deprecated: Region: the region the Load Balancer stock is in. + Region *scw.Region `json:"region,omitempty"` + + // Zone: the zone the Load Balancer stock is in. + Zone scw.Zone `json:"zone"` +} + +// Route: route. +type Route struct { + // ID: route ID. + ID string `json:"id"` + + // FrontendID: ID of the source frontend. + FrontendID string `json:"frontend_id"` + + // BackendID: ID of the target backend. + BackendID string `json:"backend_id"` + + // Match: object defining the match condition for a route to be applied. If an incoming client session matches the specified condition (i.e. it has a matching SNI value or HTTP Host header value), it will be passed to the target backend. + Match *RouteMatch `json:"match"` + + // CreatedAt: date on which the route was created. + CreatedAt *time.Time `json:"created_at"` + + // UpdatedAt: date on which the route was last updated. + UpdatedAt *time.Time `json:"updated_at"` +} + +// ACLSpec: acl spec. +type ACLSpec struct { + // Name: ACL name. + Name string `json:"name"` + + // Action: action to take when incoming traffic matches an ACL filter. + Action *ACLAction `json:"action"` + + // Match: ACL match filter object. One of `ip_subnet` or `http_filter` and `http_filter_value` are required. + Match *ACLMatch `json:"match"` + + // Index: priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed). + Index int32 `json:"index"` + + // Description: ACL description. + Description string `json:"description"` +} + +// AddBackendServersRequest: add backend servers request. +type AddBackendServersRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // BackendID: backend ID. + BackendID string `json:"-"` + + // ServerIP: list of IP addresses to add to backend servers. + ServerIP []string `json:"server_ip"` +} + +// AttachPrivateNetworkRequest: attach private network request. +type AttachPrivateNetworkRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // PrivateNetworkID: private Network ID. + PrivateNetworkID string `json:"-"` + + // Deprecated: StaticConfig: object containing an array of a local IP address for the Load Balancer on this Private Network. + // Precisely one of StaticConfig, DHCPConfig, IpamConfig must be set. + StaticConfig *PrivateNetworkStaticConfig `json:"static_config,omitempty"` + + // Deprecated: DHCPConfig: defines whether to let DHCP assign IP addresses. + // Precisely one of StaticConfig, DHCPConfig, IpamConfig must be set. + DHCPConfig *PrivateNetworkDHCPConfig `json:"dhcp_config,omitempty"` + + // Deprecated: IpamConfig: for internal use only. + // Precisely one of StaticConfig, DHCPConfig, IpamConfig must be set. + IpamConfig *PrivateNetworkIpamConfig `json:"ipam_config,omitempty"` +} + +// CreateACLRequest: Add an ACL to a Load Balancer frontend. +type CreateACLRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // FrontendID: frontend ID to attach the ACL to. + FrontendID string `json:"-"` + + // Name: ACL name. + Name string `json:"name"` + + // Action: action to take when incoming traffic matches an ACL filter. + Action *ACLAction `json:"action"` + + // Match: ACL match filter object. One of `ip_subnet` or `http_filter` & `http_filter_value` are required. + Match *ACLMatch `json:"match,omitempty"` + + // Index: priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed). + Index int32 `json:"index"` + + // Description: ACL description. + Description string `json:"description"` +} + +// CreateBackendRequest: create backend request. +type CreateBackendRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // Name: name for the backend. + Name string `json:"name"` + + // ForwardProtocol: protocol to be used by the backend when forwarding traffic to backend servers. + // Default value: tcp + ForwardProtocol Protocol `json:"forward_protocol"` + + // ForwardPort: port to be used by the backend when forwarding traffic to backend servers. + ForwardPort int32 `json:"forward_port"` + + // ForwardPortAlgorithm: load balancing algorithm to be used when determining which backend server to forward new traffic to. + // Default value: roundrobin + ForwardPortAlgorithm ForwardPortAlgorithm `json:"forward_port_algorithm"` + + // StickySessions: defines whether to activate sticky sessions (binding a particular session to a particular backend server) and the method to use if so. None disables sticky sessions. Cookie-based uses an HTTP cookie TO stick a session to a backend server. Table-based uses the source (client) IP address to stick a session to a backend server. + // Default value: none + StickySessions StickySessionsType `json:"sticky_sessions"` + + // StickySessionsCookieName: cookie name for cookie-based sticky sessions. + StickySessionsCookieName string `json:"sticky_sessions_cookie_name"` + + // HealthCheck: object defining the health check to be carried out by the backend when checking the status and health of backend servers. + HealthCheck *HealthCheck `json:"health_check"` + + // ServerIP: list of backend server IP addresses (IPv4 or IPv6) the backend should forward traffic to. + ServerIP []string `json:"server_ip"` + + // Deprecated: SendProxyV2: deprecated in favor of proxy_protocol field. + SendProxyV2 *bool `json:"send_proxy_v2,omitempty"` + + // TimeoutServer: maximum allowed time for a backend server to process a request. + TimeoutServer *time.Duration `json:"timeout_server,omitempty"` + + // TimeoutConnect: maximum allowed time for establishing a connection to a backend server. + TimeoutConnect *time.Duration `json:"timeout_connect,omitempty"` + + // TimeoutTunnel: maximum allowed tunnel inactivity time after Websocket is established (takes precedence over client and server timeout). + TimeoutTunnel *time.Duration `json:"timeout_tunnel,omitempty"` + + // OnMarkedDownAction: action to take when a backend server is marked as down. + // Default value: on_marked_down_action_none + OnMarkedDownAction OnMarkedDownAction `json:"on_marked_down_action"` + + // ProxyProtocol: protocol to use between the Load Balancer and backend servers. Allows the backend servers to be informed of the client's real IP address. The PROXY protocol must be supported by the backend servers' software. + // Default value: proxy_protocol_unknown + ProxyProtocol ProxyProtocol `json:"proxy_protocol"` + + // FailoverHost: scaleway S3 bucket website to be served as failover if all backend servers are down, e.g. failover-website.s3-website.fr-par.scw.cloud. + FailoverHost *string `json:"failover_host,omitempty"` + + // SslBridging: defines whether to enable SSL bridging between the Load Balancer and backend servers. + SslBridging *bool `json:"ssl_bridging,omitempty"` + + // IgnoreSslServerVerify: defines whether the server certificate verification should be ignored. + IgnoreSslServerVerify *bool `json:"ignore_ssl_server_verify,omitempty"` + + // RedispatchAttemptCount: whether to use another backend server on each attempt. + RedispatchAttemptCount *int32 `json:"redispatch_attempt_count,omitempty"` + + // MaxRetries: number of retries when a backend server connection failed. + MaxRetries *int32 `json:"max_retries,omitempty"` + + // MaxConnections: maximum number of connections allowed per backend server. + MaxConnections *int32 `json:"max_connections,omitempty"` + + // TimeoutQueue: maximum time for a request to be left pending in queue when `max_connections` is reached. + TimeoutQueue *scw.Duration `json:"timeout_queue,omitempty"` +} + +func (m *CreateBackendRequest) UnmarshalJSON(b []byte) error { + type tmpType CreateBackendRequest tmp := struct { tmpType - - TmpCheckDelay *marshaler.Duration `json:"check_delay"` - TmpCheckTimeout *marshaler.Duration `json:"check_timeout"` + TmpTimeoutServer *marshaler.Duration `json:"timeout_server,omitempty"` + TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect,omitempty"` + TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel,omitempty"` }{} err := json.Unmarshal(b, &tmp) if err != nil { return err } - *m = HealthCheck(tmp.tmpType) - - m.CheckDelay = tmp.TmpCheckDelay.Standard() - m.CheckTimeout = tmp.TmpCheckTimeout.Standard() + *m = CreateBackendRequest(tmp.tmpType) + m.TimeoutServer = tmp.TmpTimeoutServer.Standard() + m.TimeoutConnect = tmp.TmpTimeoutConnect.Standard() + m.TimeoutTunnel = tmp.TmpTimeoutTunnel.Standard() return nil } -func (m HealthCheck) MarshalJSON() ([]byte, error) { - type tmpType HealthCheck +func (m CreateBackendRequest) MarshalJSON() ([]byte, error) { + type tmpType CreateBackendRequest tmp := struct { tmpType - - TmpCheckDelay *marshaler.Duration `json:"check_delay"` - TmpCheckTimeout *marshaler.Duration `json:"check_timeout"` + TmpTimeoutServer *marshaler.Duration `json:"timeout_server,omitempty"` + TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect,omitempty"` + TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel,omitempty"` }{ - tmpType: tmpType(m), - - TmpCheckDelay: marshaler.NewDuration(m.CheckDelay), - TmpCheckTimeout: marshaler.NewDuration(m.CheckTimeout), + tmpType: tmpType(m), + TmpTimeoutServer: marshaler.NewDuration(m.TimeoutServer), + TmpTimeoutConnect: marshaler.NewDuration(m.TimeoutConnect), + TmpTimeoutTunnel: marshaler.NewDuration(m.TimeoutTunnel), } return json.Marshal(tmp) } -// HealthCheckHTTPConfig: health check. http config. -type HealthCheckHTTPConfig struct { - // URI: HTTP URI used for the health check. - // The HTTP URI to use when performing a health check on backend servers. - URI string `json:"uri"` - // Method: HTTP method used for the health check. - // The HTTP method used when performing a health check on backend servers. - Method string `json:"method"` - // Code: HTTP response code expected for a successful health check. - // The HTTP response code that should be returned for a health check to be considered successful. - Code *int32 `json:"code"` - // HostHeader: HTTP host header used for the health check. - // The HTTP host header used when performing a health check on backend servers. - HostHeader string `json:"host_header"` -} +// CreateCertificateRequest: create certificate request. +type CreateCertificateRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` -// HealthCheckHTTPSConfig: health check. https config. -type HealthCheckHTTPSConfig struct { - // URI: HTTP URI used for the health check. - // The HTTP URI to use when performing a health check on backend servers. - URI string `json:"uri"` - // Method: HTTP method used for the health check. - // The HTTP method used when performing a health check on backend servers. - Method string `json:"method"` - // Code: HTTP response code expected for a successful health check. - // The HTTP response code that should be returned for a health check to be considered successful. - Code *int32 `json:"code"` - // HostHeader: HTTP host header used for the health check. - // The HTTP host header used when performing a health check on backend servers. - HostHeader string `json:"host_header"` - // Sni: sNI used for SSL health checks. - // The SNI value used when performing a health check on backend servers over SSL. - Sni string `json:"sni"` -} - -type HealthCheckLdapConfig struct { -} - -// HealthCheckMysqlConfig: health check. mysql config. -type HealthCheckMysqlConfig struct { - // User: mySQL user to use for the health check. - User string `json:"user"` -} - -// HealthCheckPgsqlConfig: health check. pgsql config. -type HealthCheckPgsqlConfig struct { - // User: postgreSQL user to use for the health check. - User string `json:"user"` -} - -type HealthCheckRedisConfig struct { -} - -type HealthCheckTCPConfig struct { -} - -// IP: ip. -type IP struct { - // ID: IP address ID. - ID string `json:"id"` - // IPAddress: IP address. - IPAddress string `json:"ip_address"` - // OrganizationID: organization ID of the Scaleway Organization the IP address is in. - OrganizationID string `json:"organization_id"` - // ProjectID: project ID of the Scaleway Project the IP address is in. - ProjectID string `json:"project_id"` // LBID: load Balancer ID. - LBID *string `json:"lb_id"` - // Reverse: reverse DNS (domain name) of the IP address. - Reverse string `json:"reverse"` - // Deprecated: Region: the region the IP address is in. - Region *scw.Region `json:"region,omitempty"` - // Zone: the zone the IP address is in. - Zone scw.Zone `json:"zone"` -} + LBID string `json:"-"` -// Instance: instance. -type Instance struct { - // ID: underlying Instance ID. - ID string `json:"id"` - // Status: instance status. - // Default value: unknown - Status InstanceStatus `json:"status"` - // IPAddress: instance IP address. - IPAddress string `json:"ip_address"` - // CreatedAt: date on which the Instance was created. - CreatedAt *time.Time `json:"created_at"` - // UpdatedAt: date on which the Instance was last updated. - UpdatedAt *time.Time `json:"updated_at"` - // Deprecated: Region: the region the Instance is in. - Region *scw.Region `json:"region,omitempty"` - // Zone: the zone the Instance is in. - Zone scw.Zone `json:"zone"` -} - -// LB: lb. -type LB struct { - // ID: underlying Instance ID. - ID string `json:"id"` - // Name: load Balancer name. + // Name: name for the certificate. Name string `json:"name"` - // Description: load Balancer description. + + // Letsencrypt: object to define a new Let's Encrypt certificate to be generated. + // Precisely one of Letsencrypt, CustomCertificate must be set. + Letsencrypt *CreateCertificateRequestLetsencryptConfig `json:"letsencrypt,omitempty"` + + // CustomCertificate: object to define an existing custom certificate to be imported. + // Precisely one of Letsencrypt, CustomCertificate must be set. + CustomCertificate *CreateCertificateRequestCustomCertificate `json:"custom_certificate,omitempty"` +} + +// CreateFrontendRequest: create frontend request. +type CreateFrontendRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load Balancer ID (ID of the Load Balancer to attach the frontend to). + LBID string `json:"-"` + + // Name: name for the frontend. + Name string `json:"name"` + + // InboundPort: port the frontend should listen on. + InboundPort int32 `json:"inbound_port"` + + // BackendID: backend ID (ID of the backend the frontend should pass traffic to). + BackendID string `json:"backend_id"` + + // TimeoutClient: maximum allowed inactivity time on the client side. + TimeoutClient *time.Duration `json:"timeout_client,omitempty"` + + // Deprecated: CertificateID: certificate ID, deprecated in favor of certificate_ids array. + CertificateID *string `json:"certificate_id,omitempty"` + + // CertificateIDs: list of SSL/TLS certificate IDs to bind to the frontend. + CertificateIDs *[]string `json:"certificate_ids,omitempty"` + + // EnableHTTP3: defines whether to enable HTTP/3 protocol on the frontend. + EnableHTTP3 bool `json:"enable_http3"` +} + +func (m *CreateFrontendRequest) UnmarshalJSON(b []byte) error { + type tmpType CreateFrontendRequest + tmp := struct { + tmpType + TmpTimeoutClient *marshaler.Duration `json:"timeout_client,omitempty"` + }{} + err := json.Unmarshal(b, &tmp) + if err != nil { + return err + } + + *m = CreateFrontendRequest(tmp.tmpType) + m.TimeoutClient = tmp.TmpTimeoutClient.Standard() + return nil +} + +func (m CreateFrontendRequest) MarshalJSON() ([]byte, error) { + type tmpType CreateFrontendRequest + tmp := struct { + tmpType + TmpTimeoutClient *marshaler.Duration `json:"timeout_client,omitempty"` + }{ + tmpType: tmpType(m), + TmpTimeoutClient: marshaler.NewDuration(m.TimeoutClient), + } + return json.Marshal(tmp) +} + +// CreateIPRequest: create ip request. +type CreateIPRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // Deprecated: OrganizationID: organization ID of the Organization where the IP address should be created. + // Precisely one of ProjectID, OrganizationID must be set. + OrganizationID *string `json:"organization_id,omitempty"` + + // ProjectID: project ID of the Project where the IP address should be created. + // Precisely one of ProjectID, OrganizationID must be set. + ProjectID *string `json:"project_id,omitempty"` + + // Reverse: reverse DNS (domain name) for the IP address. + Reverse *string `json:"reverse,omitempty"` +} + +// CreateLBRequest: create lb request. +type CreateLBRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // Deprecated: OrganizationID: scaleway Organization to create the Load Balancer in. + // Precisely one of ProjectID, OrganizationID must be set. + OrganizationID *string `json:"organization_id,omitempty"` + + // ProjectID: scaleway Project to create the Load Balancer in. + // Precisely one of ProjectID, OrganizationID must be set. + ProjectID *string `json:"project_id,omitempty"` + + // Name: name for the Load Balancer. + Name string `json:"name"` + + // Description: description for the Load Balancer. Description string `json:"description"` - // Status: load Balancer status. - // Default value: unknown - Status LBStatus `json:"status"` - // Instances: list of underlying Instances. - Instances []*Instance `json:"instances"` - // OrganizationID: scaleway Organization ID. - OrganizationID string `json:"organization_id"` - // ProjectID: scaleway Project ID. - ProjectID string `json:"project_id"` - // IP: list of IP addresses attached to the Load Balancer. - IP []*IP `json:"ip"` - // Tags: load Balancer tags. + + // Deprecated: IPID: ID of an existing flexible IP address to attach to the Load Balancer. + IPID *string `json:"ip_id,omitempty"` + + // AssignFlexibleIP: defines whether to automatically assign a flexible public IP to lb. Default value is `false` (do not assign). + AssignFlexibleIP *bool `json:"assign_flexible_ip,omitempty"` + + // Tags: list of tags for the Load Balancer. Tags []string `json:"tags"` - // FrontendCount: number of frontends the Load Balancer has. - FrontendCount int32 `json:"frontend_count"` - // BackendCount: number of backends the Load Balancer has. - BackendCount int32 `json:"backend_count"` - // Type: load Balancer offer type. + + // Type: load Balancer commercial offer type. Use the Load Balancer types endpoint to retrieve a list of available offer types. Type string `json:"type"` - // Subscriber: subscriber information. - Subscriber *Subscriber `json:"subscriber"` - // SslCompatibilityLevel: determines the minimal SSL version which needs to be supported on client side. + + // SslCompatibilityLevel: determines the minimal SSL version which needs to be supported on the client side, in an SSL/TLS offloading context. Intermediate is suitable for general-purpose servers with a variety of clients, recommended for almost all systems. Modern is suitable for services with clients that support TLS 1.3 and do not need backward compatibility. Old is compatible with a small number of very old clients and should be used only as a last resort. // Default value: ssl_compatibility_level_unknown SslCompatibilityLevel SSLCompatibilityLevel `json:"ssl_compatibility_level"` - // CreatedAt: date on which the Load Balancer was created. - CreatedAt *time.Time `json:"created_at"` - // UpdatedAt: date on which the Load Balancer was last updated. - UpdatedAt *time.Time `json:"updated_at"` - // PrivateNetworkCount: number of Private Networks attached to the Load Balancer. - PrivateNetworkCount int32 `json:"private_network_count"` - // RouteCount: number of routes configured on the Load Balancer. - RouteCount int32 `json:"route_count"` - // Deprecated: Region: the region the Load Balancer is in. - Region *scw.Region `json:"region,omitempty"` - // Zone: the zone the Load Balancer is in. - Zone scw.Zone `json:"zone"` +} + +// CreateRouteRequest: create route request. +type CreateRouteRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // FrontendID: ID of the source frontend to create the route on. + FrontendID string `json:"frontend_id"` + + // BackendID: ID of the target backend for the route. + BackendID string `json:"backend_id"` + + // Match: object defining the match condition for a route to be applied. If an incoming client session matches the specified condition (i.e. it has a matching SNI value or HTTP Host header value), it will be passed to the target backend. + Match *RouteMatch `json:"match,omitempty"` +} + +// CreateSubscriberRequest: Create a new alert subscriber (webhook or email). +type CreateSubscriberRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // Name: subscriber name. + Name string `json:"name"` + + // EmailConfig: email address configuration. + // Precisely one of EmailConfig, WebhookConfig must be set. + EmailConfig *SubscriberEmailConfig `json:"email_config,omitempty"` + + // WebhookConfig: webHook URI configuration. + // Precisely one of EmailConfig, WebhookConfig must be set. + WebhookConfig *SubscriberWebhookConfig `json:"webhook_config,omitempty"` + + // Deprecated: OrganizationID: organization ID to create the subscriber in. + // Precisely one of ProjectID, OrganizationID must be set. + OrganizationID *string `json:"organization_id,omitempty"` + + // ProjectID: project ID to create the subscriber in. + // Precisely one of ProjectID, OrganizationID must be set. + ProjectID *string `json:"project_id,omitempty"` +} + +// DeleteACLRequest: delete acl request. +type DeleteACLRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // ACLID: ACL ID. + ACLID string `json:"-"` +} + +// DeleteBackendRequest: delete backend request. +type DeleteBackendRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // BackendID: ID of the backend to delete. + BackendID string `json:"-"` +} + +// DeleteCertificateRequest: delete certificate request. +type DeleteCertificateRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // CertificateID: certificate ID. + CertificateID string `json:"-"` +} + +// DeleteFrontendRequest: delete frontend request. +type DeleteFrontendRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // FrontendID: ID of the frontend to delete. + FrontendID string `json:"-"` +} + +// DeleteLBRequest: delete lb request. +type DeleteLBRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: ID of the Load Balancer to delete. + LBID string `json:"-"` + + // ReleaseIP: defines whether the Load Balancer's flexible IP should be deleted. Set to true to release the flexible IP, or false to keep it available in your account for future Load Balancers. + ReleaseIP bool `json:"release_ip"` +} + +// DeleteRouteRequest: delete route request. +type DeleteRouteRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // RouteID: route ID. + RouteID string `json:"-"` +} + +// DeleteSubscriberRequest: delete subscriber request. +type DeleteSubscriberRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // SubscriberID: subscriber ID. + SubscriberID string `json:"-"` +} + +// DetachPrivateNetworkRequest: detach private network request. +type DetachPrivateNetworkRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load balancer ID. + LBID string `json:"-"` + + // PrivateNetworkID: set your instance private network id. + PrivateNetworkID string `json:"-"` +} + +// GetACLRequest: get acl request. +type GetACLRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // ACLID: ACL ID. + ACLID string `json:"-"` +} + +// GetBackendRequest: get backend request. +type GetBackendRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // BackendID: backend ID. + BackendID string `json:"-"` +} + +// GetCertificateRequest: get certificate request. +type GetCertificateRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // CertificateID: certificate ID. + CertificateID string `json:"-"` +} + +// GetFrontendRequest: get frontend request. +type GetFrontendRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // FrontendID: frontend ID. + FrontendID string `json:"-"` +} + +// GetIPRequest: get ip request. +type GetIPRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // IPID: IP address ID. + IPID string `json:"-"` +} + +// GetLBRequest: get lb request. +type GetLBRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` +} + +// GetLBStatsRequest: Get Load Balancer stats. +type GetLBStatsRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // BackendID: ID of the backend. + BackendID *string `json:"backend_id,omitempty"` +} + +// GetRouteRequest: get route request. +type GetRouteRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // RouteID: route ID. + RouteID string `json:"-"` +} + +// GetSubscriberRequest: get subscriber request. +type GetSubscriberRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // SubscriberID: subscriber ID. + SubscriberID string `json:"-"` } // LBStats: lb stats. @@ -1383,236 +2089,2176 @@ type LBStats struct { BackendServersStats []*BackendServerStats `json:"backend_servers_stats"` } -// LBType: lb type. -type LBType struct { - // Name: load Balancer commercial offer type name. - Name string `json:"name"` - // StockStatus: current stock status for a given Load Balancer type. - // Default value: unknown - StockStatus LBTypeStock `json:"stock_status"` - // Description: load Balancer commercial offer type description. - Description string `json:"description"` - // Deprecated: Region: the region the Load Balancer stock is in. - Region *scw.Region `json:"region,omitempty"` - // Zone: the zone the Load Balancer stock is in. - Zone scw.Zone `json:"zone"` -} - // ListACLResponse: list acl response. type ListACLResponse struct { // ACLs: list of ACL objects. ACLs []*ACL `json:"acls"` + // TotalCount: the total number of objects. TotalCount uint32 `json:"total_count"` } +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListACLResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListACLResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListACLResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.ACLs = append(r.ACLs, results.ACLs...) + r.TotalCount += uint32(len(results.ACLs)) + return uint32(len(results.ACLs)), nil +} + +// ListACLsRequest: list ac ls request. +type ListACLsRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // FrontendID: frontend ID (ACLs attached to this frontend will be returned in the response). + FrontendID string `json:"-"` + + // OrderBy: sort order of ACLs in the response. + // Default value: created_at_asc + OrderBy ListACLRequestOrderBy `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: the number of ACLs to return. + PageSize *uint32 `json:"-"` + + // Name: ACL name to filter for. + Name *string `json:"-"` +} + +// ListBackendStatsRequest: list backend stats request. +type ListBackendStatsRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: number of items to return. + PageSize *uint32 `json:"-"` + + // BackendID: ID of the backend. + BackendID *string `json:"-"` +} + // ListBackendStatsResponse: list backend stats response. type ListBackendStatsResponse struct { // BackendServersStats: list of objects containing backend server statistics. BackendServersStats []*BackendServerStats `json:"backend_servers_stats"` + // TotalCount: the total number of objects. TotalCount uint32 `json:"total_count"` } +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListBackendStatsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListBackendStatsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListBackendStatsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.BackendServersStats = append(r.BackendServersStats, results.BackendServersStats...) + r.TotalCount += uint32(len(results.BackendServersStats)) + return uint32(len(results.BackendServersStats)), nil +} + +// ListBackendsRequest: list backends request. +type ListBackendsRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // Name: name of the backend to filter for. + Name *string `json:"-"` + + // OrderBy: sort order of backends in the response. + // Default value: created_at_asc + OrderBy ListBackendsRequestOrderBy `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: number of backends to return. + PageSize *uint32 `json:"-"` +} + // ListBackendsResponse: list backends response. type ListBackendsResponse struct { // Backends: list of backend objects of a given Load Balancer. Backends []*Backend `json:"backends"` + // TotalCount: total count of backend objects, without pagination. TotalCount uint32 `json:"total_count"` } +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListBackendsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListBackendsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListBackendsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Backends = append(r.Backends, results.Backends...) + r.TotalCount += uint32(len(results.Backends)) + return uint32(len(results.Backends)), nil +} + +// ListCertificatesRequest: list certificates request. +type ListCertificatesRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // OrderBy: sort order of certificates in the response. + // Default value: created_at_asc + OrderBy ListCertificatesRequestOrderBy `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: number of certificates to return. + PageSize *uint32 `json:"-"` + + // Name: certificate name to filter for, only certificates of this name will be returned. + Name *string `json:"-"` +} + // ListCertificatesResponse: list certificates response. type ListCertificatesResponse struct { // Certificates: list of certificate objects. Certificates []*Certificate `json:"certificates"` + // TotalCount: the total number of objects. TotalCount uint32 `json:"total_count"` } +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListCertificatesResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListCertificatesResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListCertificatesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Certificates = append(r.Certificates, results.Certificates...) + r.TotalCount += uint32(len(results.Certificates)) + return uint32(len(results.Certificates)), nil +} + +// ListFrontendsRequest: list frontends request. +type ListFrontendsRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // Name: name of the frontend to filter for. + Name *string `json:"-"` + + // OrderBy: sort order of frontends in the response. + // Default value: created_at_asc + OrderBy ListFrontendsRequestOrderBy `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: number of frontends to return. + PageSize *uint32 `json:"-"` +} + // ListFrontendsResponse: list frontends response. type ListFrontendsResponse struct { // Frontends: list of frontend objects of a given Load Balancer. Frontends []*Frontend `json:"frontends"` + // TotalCount: total count of frontend objects, without pagination. TotalCount uint32 `json:"total_count"` } -// ListIPsResponse: list ips response. +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListFrontendsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListFrontendsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListFrontendsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Frontends = append(r.Frontends, results.Frontends...) + r.TotalCount += uint32(len(results.Frontends)) + return uint32(len(results.Frontends)), nil +} + +// ListIPsRequest: list i ps request. +type ListIPsRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: number of IP addresses to return. + PageSize *uint32 `json:"-"` + + // IPAddress: IP address to filter for. + IPAddress *string `json:"-"` + + // OrganizationID: organization ID to filter for, only Load Balancer IP addresses from this Organization will be returned. + OrganizationID *string `json:"-"` + + // ProjectID: project ID to filter for, only Load Balancer IP addresses from this Project will be returned. + ProjectID *string `json:"-"` +} + +// ListIPsResponse: list i ps response. type ListIPsResponse struct { // IPs: list of IP address objects. IPs []*IP `json:"ips"` + // TotalCount: total count of IP address objects, without pagination. TotalCount uint32 `json:"total_count"` } +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListIPsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListIPsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListIPsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.IPs = append(r.IPs, results.IPs...) + r.TotalCount += uint32(len(results.IPs)) + return uint32(len(results.IPs)), nil +} + +// ListLBPrivateNetworksRequest: list lb private networks request. +type ListLBPrivateNetworksRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // OrderBy: sort order of Private Network objects in the response. + // Default value: created_at_asc + OrderBy ListPrivateNetworksRequestOrderBy `json:"-"` + + // PageSize: number of objects to return. + PageSize *uint32 `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` +} + // ListLBPrivateNetworksResponse: list lb private networks response. type ListLBPrivateNetworksResponse struct { // PrivateNetwork: list of Private Network objects attached to the Load Balancer. PrivateNetwork []*PrivateNetwork `json:"private_network"` + // TotalCount: total number of objects in the response. TotalCount uint32 `json:"total_count"` } +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListLBPrivateNetworksResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListLBPrivateNetworksResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListLBPrivateNetworksResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.PrivateNetwork = append(r.PrivateNetwork, results.PrivateNetwork...) + r.TotalCount += uint32(len(results.PrivateNetwork)) + return uint32(len(results.PrivateNetwork)), nil +} + +// ListLBTypesRequest: list lb types request. +type ListLBTypesRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: the number of items to return. + PageSize *uint32 `json:"-"` +} + // ListLBTypesResponse: list lb types response. type ListLBTypesResponse struct { // LBTypes: list of Load Balancer commercial offer type objects. LBTypes []*LBType `json:"lb_types"` + // TotalCount: total number of Load Balancer offer type objects. TotalCount uint32 `json:"total_count"` } -// ListLBsResponse: list lbs response. +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListLBTypesResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListLBTypesResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListLBTypesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.LBTypes = append(r.LBTypes, results.LBTypes...) + r.TotalCount += uint32(len(results.LBTypes)) + return uint32(len(results.LBTypes)), nil +} + +// ListLBsRequest: list l bs request. +type ListLBsRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // Name: load Balancer name to filter for. + Name *string `json:"-"` + + // OrderBy: sort order of Load Balancers in the response. + // Default value: created_at_asc + OrderBy ListLBsRequestOrderBy `json:"-"` + + // PageSize: number of Load Balancers to return. + PageSize *uint32 `json:"-"` + + // Page: page number to return, from the paginated results. + Page *int32 `json:"-"` + + // OrganizationID: organization ID to filter for, only Load Balancers from this Organization will be returned. + OrganizationID *string `json:"-"` + + // ProjectID: project ID to filter for, only Load Balancers from this Project will be returned. + ProjectID *string `json:"-"` +} + +// ListLBsResponse: list l bs response. type ListLBsResponse struct { // LBs: list of Load Balancer objects. LBs []*LB `json:"lbs"` + // TotalCount: the total number of Load Balancer objects. TotalCount uint32 `json:"total_count"` } +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListLBsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListLBsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListLBsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.LBs = append(r.LBs, results.LBs...) + r.TotalCount += uint32(len(results.LBs)) + return uint32(len(results.LBs)), nil +} + +// ListRoutesRequest: list routes request. +type ListRoutesRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // OrderBy: sort order of routes in the response. + // Default value: created_at_asc + OrderBy ListRoutesRequestOrderBy `json:"-"` + + // PageSize: the number of route objects to return. + PageSize *uint32 `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // FrontendID: frontend ID to filter for, only Routes from this Frontend will be returned. + FrontendID *string `json:"-"` +} + // ListRoutesResponse: list routes response. type ListRoutesResponse struct { // Routes: list of route objects. Routes []*Route `json:"routes"` + // TotalCount: the total number of route objects. TotalCount uint32 `json:"total_count"` } +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListRoutesResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *ListRoutesResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListRoutesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Routes = append(r.Routes, results.Routes...) + r.TotalCount += uint32(len(results.Routes)) + return uint32(len(results.Routes)), nil +} + +// ListSubscriberRequest: list subscriber request. +type ListSubscriberRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // OrderBy: sort order of subscribers in the response. + // Default value: created_at_asc + OrderBy ListSubscriberRequestOrderBy `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: the number of items to return. + PageSize *uint32 `json:"-"` + + // Name: subscriber name to search for. + Name *string `json:"-"` + + // OrganizationID: filter subscribers by Organization ID. + OrganizationID *string `json:"-"` + + // ProjectID: filter subscribers by Project ID. + ProjectID *string `json:"-"` +} + // ListSubscriberResponse: list subscriber response. type ListSubscriberResponse struct { // Subscribers: list of subscriber objects. Subscribers []*Subscriber `json:"subscribers"` + // TotalCount: the total number of objects. TotalCount uint32 `json:"total_count"` } -// PrivateNetwork: private network. -type PrivateNetwork struct { - // LB: load Balancer object which is attached to the Private Network. - LB *LB `json:"lb"` - // Deprecated: StaticConfig: object containing an array of a local IP address for the Load Balancer on this Private Network. - // Precisely one of DHCPConfig, IpamConfig, StaticConfig must be set. - StaticConfig *PrivateNetworkStaticConfig `json:"static_config,omitempty"` - // DHCPConfig: object containing DHCP-assigned IP addresses. - // Precisely one of DHCPConfig, IpamConfig, StaticConfig must be set. - DHCPConfig *PrivateNetworkDHCPConfig `json:"dhcp_config,omitempty"` - // Deprecated: IpamConfig: for internal use only. - // Precisely one of DHCPConfig, IpamConfig, StaticConfig must be set. - IpamConfig *PrivateNetworkIpamConfig `json:"ipam_config,omitempty"` - // PrivateNetworkID: private Network ID. - PrivateNetworkID string `json:"private_network_id"` - // Status: status of Private Network connection. - // Default value: unknown - Status PrivateNetworkStatus `json:"status"` - // CreatedAt: date on which the Private Network was created. - CreatedAt *time.Time `json:"created_at"` - // UpdatedAt: date on which the PN was last updated. - UpdatedAt *time.Time `json:"updated_at"` +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListSubscriberResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount } -type PrivateNetworkDHCPConfig struct { - IPID *string `json:"ip_id"` +// UnsafeAppend should not be used +// Internal usage only +func (r *ListSubscriberResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListSubscriberResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.Subscribers = append(r.Subscribers, results.Subscribers...) + r.TotalCount += uint32(len(results.Subscribers)) + return uint32(len(results.Subscribers)), nil } -type PrivateNetworkIpamConfig struct { +// MigrateLBRequest: migrate lb request. +type MigrateLBRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // Type: load Balancer type to migrate to (use the List all Load Balancer offer types endpoint to get a list of available offer types). + Type string `json:"type"` } -// PrivateNetworkStaticConfig: private network. static config. -type PrivateNetworkStaticConfig struct { - // Deprecated: IPAddress: array of a local IP address for the Load Balancer on this Private Network. - IPAddress *[]string `json:"ip_address,omitempty"` +// ReleaseIPRequest: release ip request. +type ReleaseIPRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // IPID: IP address ID. + IPID string `json:"-"` } -// Route: route. -type Route struct { - // ID: route ID. - ID string `json:"id"` - // FrontendID: ID of the source frontend. - FrontendID string `json:"frontend_id"` - // BackendID: ID of the target backend. - BackendID string `json:"backend_id"` - // Match: object defining the match condition for a route to be applied. If an incoming client session matches the specified condition (i.e. it has a matching SNI value or HTTP Host header value), it will be passed to the target backend. - Match *RouteMatch `json:"match"` - // CreatedAt: date on which the route was created. - CreatedAt *time.Time `json:"created_at"` - // UpdatedAt: date on which the route was last updated. - UpdatedAt *time.Time `json:"updated_at"` +// RemoveBackendServersRequest: remove backend servers request. +type RemoveBackendServersRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // BackendID: backend ID. + BackendID string `json:"-"` + + // ServerIP: list of IP addresses to remove from backend servers. + ServerIP []string `json:"server_ip"` } -// RouteMatch: route. match. -type RouteMatch struct { - // Sni: server Name Indication (SNI) value to match. - // Value to match in the Server Name Indication TLS extension (SNI) field from an incoming connection made via an SSL/TLS transport layer. This field should be set for routes on TCP Load Balancers. - // Precisely one of HostHeader, Sni must be set. - Sni *string `json:"sni,omitempty"` - // HostHeader: HTTP host header to match. - // Value to match in the HTTP Host request header from an incoming connection. This field should be set for routes on HTTP Load Balancers. - // Precisely one of HostHeader, Sni must be set. - HostHeader *string `json:"host_header,omitempty"` -} - -// SetACLsResponse: set acls response. +// SetACLsResponse: set ac ls response. type SetACLsResponse struct { // ACLs: list of ACL objects. ACLs []*ACL `json:"acls"` + // TotalCount: the total number of ACL objects. TotalCount uint32 `json:"total_count"` } -// Subscriber: subscriber. -type Subscriber struct { - // ID: subscriber ID. - ID string `json:"id"` +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *SetACLsResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount +} + +// UnsafeAppend should not be used +// Internal usage only +func (r *SetACLsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*SetACLsResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) + } + + r.ACLs = append(r.ACLs, results.ACLs...) + r.TotalCount += uint32(len(results.ACLs)) + return uint32(len(results.ACLs)), nil +} + +// SetBackendServersRequest: set backend servers request. +type SetBackendServersRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // BackendID: backend ID. + BackendID string `json:"-"` + + // ServerIP: list of IP addresses for backend servers. Any other existing backend servers will be removed. + ServerIP []string `json:"server_ip"` +} + +// SubscribeToLBRequest: subscribe to lb request. +type SubscribeToLBRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // SubscriberID: subscriber ID. + SubscriberID string `json:"subscriber_id"` +} + +// UnsubscribeFromLBRequest: unsubscribe from lb request. +type UnsubscribeFromLBRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` +} + +// UpdateACLRequest: update acl request. +type UpdateACLRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // ACLID: ACL ID. + ACLID string `json:"-"` + + // Name: ACL name. + Name string `json:"name"` + + // Action: action to take when incoming traffic matches an ACL filter. + Action *ACLAction `json:"action"` + + // Match: ACL match filter object. One of `ip_subnet` or `http_filter` & `http_filter_value` are required. + Match *ACLMatch `json:"match,omitempty"` + + // Index: priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed). + Index int32 `json:"index"` + + // Description: ACL description. + Description *string `json:"description,omitempty"` +} + +// UpdateBackendRequest: update backend request. +type UpdateBackendRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // BackendID: backend ID. + BackendID string `json:"-"` + + // Name: backend name. + Name string `json:"name"` + + // ForwardProtocol: protocol to be used by the backend when forwarding traffic to backend servers. + // Default value: tcp + ForwardProtocol Protocol `json:"forward_protocol"` + + // ForwardPort: port to be used by the backend when forwarding traffic to backend servers. + ForwardPort int32 `json:"forward_port"` + + // ForwardPortAlgorithm: load balancing algorithm to be used when determining which backend server to forward new traffic to. + // Default value: roundrobin + ForwardPortAlgorithm ForwardPortAlgorithm `json:"forward_port_algorithm"` + + // StickySessions: defines whether to activate sticky sessions (binding a particular session to a particular backend server) and the method to use if so. None disables sticky sessions. Cookie-based uses an HTTP cookie to stick a session to a backend server. Table-based uses the source (client) IP address to stick a session to a backend server. + // Default value: none + StickySessions StickySessionsType `json:"sticky_sessions"` + + // StickySessionsCookieName: cookie name for cookie-based sticky sessions. + StickySessionsCookieName string `json:"sticky_sessions_cookie_name"` + + // Deprecated: SendProxyV2: deprecated in favor of proxy_protocol field. + SendProxyV2 *bool `json:"send_proxy_v2,omitempty"` + + // TimeoutServer: maximum allowed time for a backend server to process a request. + TimeoutServer *time.Duration `json:"timeout_server,omitempty"` + + // TimeoutConnect: maximum allowed time for establishing a connection to a backend server. + TimeoutConnect *time.Duration `json:"timeout_connect,omitempty"` + + // TimeoutTunnel: maximum allowed tunnel inactivity time after Websocket is established (takes precedence over client and server timeout). + TimeoutTunnel *time.Duration `json:"timeout_tunnel,omitempty"` + + // OnMarkedDownAction: action to take when a backend server is marked as down. + // Default value: on_marked_down_action_none + OnMarkedDownAction OnMarkedDownAction `json:"on_marked_down_action"` + + // ProxyProtocol: protocol to use between the Load Balancer and backend servers. Allows the backend servers to be informed of the client's real IP address. The PROXY protocol must be supported by the backend servers' software. + // Default value: proxy_protocol_unknown + ProxyProtocol ProxyProtocol `json:"proxy_protocol"` + + // FailoverHost: scaleway S3 bucket website to be served as failover if all backend servers are down, e.g. failover-website.s3-website.fr-par.scw.cloud. + FailoverHost *string `json:"failover_host,omitempty"` + + // SslBridging: defines whether to enable SSL bridging between the Load Balancer and backend servers. + SslBridging *bool `json:"ssl_bridging,omitempty"` + + // IgnoreSslServerVerify: defines whether the server certificate verification should be ignored. + IgnoreSslServerVerify *bool `json:"ignore_ssl_server_verify,omitempty"` + + // RedispatchAttemptCount: whether to use another backend server on each attempt. + RedispatchAttemptCount *int32 `json:"redispatch_attempt_count,omitempty"` + + // MaxRetries: number of retries when a backend server connection failed. + MaxRetries *int32 `json:"max_retries,omitempty"` + + // MaxConnections: maximum number of connections allowed per backend server. + MaxConnections *int32 `json:"max_connections,omitempty"` + + // TimeoutQueue: maximum time for a request to be left pending in queue when `max_connections` is reached. + TimeoutQueue *scw.Duration `json:"timeout_queue,omitempty"` +} + +func (m *UpdateBackendRequest) UnmarshalJSON(b []byte) error { + type tmpType UpdateBackendRequest + tmp := struct { + tmpType + TmpTimeoutServer *marshaler.Duration `json:"timeout_server,omitempty"` + TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect,omitempty"` + TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel,omitempty"` + }{} + err := json.Unmarshal(b, &tmp) + if err != nil { + return err + } + + *m = UpdateBackendRequest(tmp.tmpType) + m.TimeoutServer = tmp.TmpTimeoutServer.Standard() + m.TimeoutConnect = tmp.TmpTimeoutConnect.Standard() + m.TimeoutTunnel = tmp.TmpTimeoutTunnel.Standard() + return nil +} + +func (m UpdateBackendRequest) MarshalJSON() ([]byte, error) { + type tmpType UpdateBackendRequest + tmp := struct { + tmpType + TmpTimeoutServer *marshaler.Duration `json:"timeout_server,omitempty"` + TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect,omitempty"` + TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel,omitempty"` + }{ + tmpType: tmpType(m), + TmpTimeoutServer: marshaler.NewDuration(m.TimeoutServer), + TmpTimeoutConnect: marshaler.NewDuration(m.TimeoutConnect), + TmpTimeoutTunnel: marshaler.NewDuration(m.TimeoutTunnel), + } + return json.Marshal(tmp) +} + +// UpdateCertificateRequest: update certificate request. +type UpdateCertificateRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // CertificateID: certificate ID. + CertificateID string `json:"-"` + + // Name: certificate name. + Name string `json:"name"` +} + +// UpdateFrontendRequest: update frontend request. +type UpdateFrontendRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // FrontendID: frontend ID. + FrontendID string `json:"-"` + + // Name: frontend name. + Name string `json:"name"` + + // InboundPort: port the frontend should listen on. + InboundPort int32 `json:"inbound_port"` + + // BackendID: backend ID (ID of the backend the frontend should pass traffic to). + BackendID string `json:"backend_id"` + + // TimeoutClient: maximum allowed inactivity time on the client side. + TimeoutClient *time.Duration `json:"timeout_client,omitempty"` + + // Deprecated: CertificateID: certificate ID, deprecated in favor of certificate_ids array. + CertificateID *string `json:"certificate_id,omitempty"` + + // CertificateIDs: list of SSL/TLS certificate IDs to bind to the frontend. + CertificateIDs *[]string `json:"certificate_ids,omitempty"` + + // EnableHTTP3: defines whether to enable HTTP/3 protocol on the frontend. + EnableHTTP3 bool `json:"enable_http3"` +} + +func (m *UpdateFrontendRequest) UnmarshalJSON(b []byte) error { + type tmpType UpdateFrontendRequest + tmp := struct { + tmpType + TmpTimeoutClient *marshaler.Duration `json:"timeout_client,omitempty"` + }{} + err := json.Unmarshal(b, &tmp) + if err != nil { + return err + } + + *m = UpdateFrontendRequest(tmp.tmpType) + m.TimeoutClient = tmp.TmpTimeoutClient.Standard() + return nil +} + +func (m UpdateFrontendRequest) MarshalJSON() ([]byte, error) { + type tmpType UpdateFrontendRequest + tmp := struct { + tmpType + TmpTimeoutClient *marshaler.Duration `json:"timeout_client,omitempty"` + }{ + tmpType: tmpType(m), + TmpTimeoutClient: marshaler.NewDuration(m.TimeoutClient), + } + return json.Marshal(tmp) +} + +// UpdateHealthCheckRequest: update health check request. +type UpdateHealthCheckRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // BackendID: backend ID. + BackendID string `json:"-"` + + // Port: port to use for the backend server health check. + Port int32 `json:"port"` + + // CheckDelay: time to wait between two consecutive health checks. + CheckDelay *time.Duration `json:"check_delay,omitempty"` + + // CheckTimeout: maximum time a backend server has to reply to the health check. + CheckTimeout *time.Duration `json:"check_timeout,omitempty"` + + // CheckMaxRetries: number of consecutive unsuccessful health checks after which the server will be considered dead. + CheckMaxRetries int32 `json:"check_max_retries"` + + // CheckSendProxy: defines whether proxy protocol should be activated for the health check. + CheckSendProxy bool `json:"check_send_proxy"` + + // TCPConfig: object to configure a basic TCP health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + TCPConfig *HealthCheckTCPConfig `json:"tcp_config,omitempty"` + + // MysqlConfig: object to configure a MySQL health check. The check requires MySQL >=3.22, for older versions, use a TCP health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + MysqlConfig *HealthCheckMysqlConfig `json:"mysql_config,omitempty"` + + // PgsqlConfig: object to configure a PostgreSQL health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + PgsqlConfig *HealthCheckPgsqlConfig `json:"pgsql_config,omitempty"` + + // LdapConfig: object to configure an LDAP health check. The response is analyzed to find the LDAPv3 response message. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + LdapConfig *HealthCheckLdapConfig `json:"ldap_config,omitempty"` + + // RedisConfig: object to configure a Redis health check. The response is analyzed to find the +PONG response message. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + RedisConfig *HealthCheckRedisConfig `json:"redis_config,omitempty"` + + // HTTPConfig: object to configure an HTTP health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + HTTPConfig *HealthCheckHTTPConfig `json:"http_config,omitempty"` + + // HTTPSConfig: object to configure an HTTPS health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + HTTPSConfig *HealthCheckHTTPSConfig `json:"https_config,omitempty"` + + // TransientCheckDelay: time to wait between two consecutive health checks when a backend server is in a transient state (going UP or DOWN). + TransientCheckDelay *scw.Duration `json:"transient_check_delay,omitempty"` +} + +func (m *UpdateHealthCheckRequest) UnmarshalJSON(b []byte) error { + type tmpType UpdateHealthCheckRequest + tmp := struct { + tmpType + TmpCheckDelay *marshaler.Duration `json:"check_delay,omitempty"` + TmpCheckTimeout *marshaler.Duration `json:"check_timeout,omitempty"` + }{} + err := json.Unmarshal(b, &tmp) + if err != nil { + return err + } + + *m = UpdateHealthCheckRequest(tmp.tmpType) + m.CheckDelay = tmp.TmpCheckDelay.Standard() + m.CheckTimeout = tmp.TmpCheckTimeout.Standard() + return nil +} + +func (m UpdateHealthCheckRequest) MarshalJSON() ([]byte, error) { + type tmpType UpdateHealthCheckRequest + tmp := struct { + tmpType + TmpCheckDelay *marshaler.Duration `json:"check_delay,omitempty"` + TmpCheckTimeout *marshaler.Duration `json:"check_timeout,omitempty"` + }{ + tmpType: tmpType(m), + TmpCheckDelay: marshaler.NewDuration(m.CheckDelay), + TmpCheckTimeout: marshaler.NewDuration(m.CheckTimeout), + } + return json.Marshal(tmp) +} + +// UpdateIPRequest: update ip request. +type UpdateIPRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // IPID: IP address ID. + IPID string `json:"-"` + + // Reverse: reverse DNS (domain name) for the IP address. + Reverse *string `json:"reverse,omitempty"` +} + +// UpdateLBRequest: update lb request. +type UpdateLBRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // Name: load Balancer name. + Name string `json:"name"` + + // Description: load Balancer description. + Description string `json:"description"` + + // Tags: list of tags for the Load Balancer. + Tags []string `json:"tags"` + + // SslCompatibilityLevel: determines the minimal SSL version which needs to be supported on the client side, in an SSL/TLS offloading context. Intermediate is suitable for general-purpose servers with a variety of clients, recommended for almost all systems. Modern is suitable for services with clients that support TLS 1.3 and don't need backward compatibility. Old is compatible with a small number of very old clients and should be used only as a last resort. + // Default value: ssl_compatibility_level_unknown + SslCompatibilityLevel SSLCompatibilityLevel `json:"ssl_compatibility_level"` +} + +// UpdateRouteRequest: update route request. +type UpdateRouteRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // RouteID: route ID. + RouteID string `json:"-"` + + // BackendID: ID of the target backend for the route. + BackendID string `json:"backend_id"` + + // Match: object defining the match condition for a route to be applied. If an incoming client session matches the specified condition (i.e. it has a matching SNI value or HTTP Host header value), it will be passed to the target backend. + Match *RouteMatch `json:"match,omitempty"` +} + +// UpdateSubscriberRequest: update subscriber request. +type UpdateSubscriberRequest struct { + // Region: region to target. If none is passed will use default region from the config. + Region scw.Region `json:"-"` + + // SubscriberID: subscriber ID. + SubscriberID string `json:"-"` + // Name: subscriber name. Name string `json:"name"` - // EmailConfig: email address of subscriber. + + // EmailConfig: email address configuration. // Precisely one of EmailConfig, WebhookConfig must be set. EmailConfig *SubscriberEmailConfig `json:"email_config,omitempty"` - // WebhookConfig: webhook URI of subscriber. + + // WebhookConfig: webhook URI configuration. // Precisely one of EmailConfig, WebhookConfig must be set. WebhookConfig *SubscriberWebhookConfig `json:"webhook_config,omitempty"` } -// SubscriberEmailConfig: subscriber. email config. -type SubscriberEmailConfig struct { - // Email: email address to send alerts to. - Email string `json:"email"` +// ZonedAPIAddBackendServersRequest: zoned api add backend servers request. +type ZonedAPIAddBackendServersRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // BackendID: backend ID. + BackendID string `json:"-"` + + // ServerIP: list of IP addresses to add to backend servers. + ServerIP []string `json:"server_ip"` } -// SubscriberWebhookConfig: webhook alert of subscriber. -// Subscriber. webhook config. -type SubscriberWebhookConfig struct { - // URI: URI to receive POST requests. - URI string `json:"uri"` +// ZonedAPIAttachPrivateNetworkRequest: zoned api attach private network request. +type ZonedAPIAttachPrivateNetworkRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // PrivateNetworkID: private Network ID. + PrivateNetworkID string `json:"-"` + + // Deprecated: StaticConfig: object containing an array of a local IP address for the Load Balancer on this Private Network. + // Precisely one of StaticConfig, DHCPConfig, IpamConfig must be set. + StaticConfig *PrivateNetworkStaticConfig `json:"static_config,omitempty"` + + // Deprecated: DHCPConfig: defines whether to let DHCP assign IP addresses. + // Precisely one of StaticConfig, DHCPConfig, IpamConfig must be set. + DHCPConfig *PrivateNetworkDHCPConfig `json:"dhcp_config,omitempty"` + + // Deprecated: IpamConfig: for internal use only. + // Precisely one of StaticConfig, DHCPConfig, IpamConfig must be set. + IpamConfig *PrivateNetworkIpamConfig `json:"ipam_config,omitempty"` } -// Service ZonedAPI +// ZonedAPICreateACLRequest: Add an ACL to a Load Balancer frontend. +type ZonedAPICreateACLRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` -// Zones list localities the api is available in -func (s *ZonedAPI) Zones() []scw.Zone { - return []scw.Zone{scw.ZoneFrPar1, scw.ZoneFrPar2, scw.ZoneNlAms1, scw.ZoneNlAms2, scw.ZoneNlAms3, scw.ZonePlWaw1, scw.ZonePlWaw2} + // FrontendID: frontend ID to attach the ACL to. + FrontendID string `json:"-"` + + // Name: ACL name. + Name string `json:"name"` + + // Action: action to take when incoming traffic matches an ACL filter. + Action *ACLAction `json:"action"` + + // Match: ACL match filter object. One of `ip_subnet` or `http_filter` & `http_filter_value` are required. + Match *ACLMatch `json:"match,omitempty"` + + // Index: priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed). + Index int32 `json:"index"` + + // Description: ACL description. + Description string `json:"description"` } +// ZonedAPICreateBackendRequest: zoned api create backend request. +type ZonedAPICreateBackendRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // Name: name for the backend. + Name string `json:"name"` + + // ForwardProtocol: protocol to be used by the backend when forwarding traffic to backend servers. + // Default value: tcp + ForwardProtocol Protocol `json:"forward_protocol"` + + // ForwardPort: port to be used by the backend when forwarding traffic to backend servers. + ForwardPort int32 `json:"forward_port"` + + // ForwardPortAlgorithm: load balancing algorithm to be used when determining which backend server to forward new traffic to. + // Default value: roundrobin + ForwardPortAlgorithm ForwardPortAlgorithm `json:"forward_port_algorithm"` + + // StickySessions: defines whether to activate sticky sessions (binding a particular session to a particular backend server) and the method to use if so. None disables sticky sessions. Cookie-based uses an HTTP cookie TO stick a session to a backend server. Table-based uses the source (client) IP address to stick a session to a backend server. + // Default value: none + StickySessions StickySessionsType `json:"sticky_sessions"` + + // StickySessionsCookieName: cookie name for cookie-based sticky sessions. + StickySessionsCookieName string `json:"sticky_sessions_cookie_name"` + + // HealthCheck: object defining the health check to be carried out by the backend when checking the status and health of backend servers. + HealthCheck *HealthCheck `json:"health_check"` + + // ServerIP: list of backend server IP addresses (IPv4 or IPv6) the backend should forward traffic to. + ServerIP []string `json:"server_ip"` + + // Deprecated: SendProxyV2: deprecated in favor of proxy_protocol field. + SendProxyV2 *bool `json:"send_proxy_v2,omitempty"` + + // TimeoutServer: maximum allowed time for a backend server to process a request. + TimeoutServer *time.Duration `json:"timeout_server,omitempty"` + + // TimeoutConnect: maximum allowed time for establishing a connection to a backend server. + TimeoutConnect *time.Duration `json:"timeout_connect,omitempty"` + + // TimeoutTunnel: maximum allowed tunnel inactivity time after Websocket is established (takes precedence over client and server timeout). + TimeoutTunnel *time.Duration `json:"timeout_tunnel,omitempty"` + + // OnMarkedDownAction: action to take when a backend server is marked as down. + // Default value: on_marked_down_action_none + OnMarkedDownAction OnMarkedDownAction `json:"on_marked_down_action"` + + // ProxyProtocol: protocol to use between the Load Balancer and backend servers. Allows the backend servers to be informed of the client's real IP address. The PROXY protocol must be supported by the backend servers' software. + // Default value: proxy_protocol_unknown + ProxyProtocol ProxyProtocol `json:"proxy_protocol"` + + // FailoverHost: scaleway S3 bucket website to be served as failover if all backend servers are down, e.g. failover-website.s3-website.fr-par.scw.cloud. + FailoverHost *string `json:"failover_host,omitempty"` + + // SslBridging: defines whether to enable SSL bridging between the Load Balancer and backend servers. + SslBridging *bool `json:"ssl_bridging,omitempty"` + + // IgnoreSslServerVerify: defines whether the server certificate verification should be ignored. + IgnoreSslServerVerify *bool `json:"ignore_ssl_server_verify,omitempty"` + + // RedispatchAttemptCount: whether to use another backend server on each attempt. + RedispatchAttemptCount *int32 `json:"redispatch_attempt_count,omitempty"` + + // MaxRetries: number of retries when a backend server connection failed. + MaxRetries *int32 `json:"max_retries,omitempty"` + + // MaxConnections: maximum number of connections allowed per backend server. + MaxConnections *int32 `json:"max_connections,omitempty"` + + // TimeoutQueue: maximum time for a request to be left pending in queue when `max_connections` is reached. + TimeoutQueue *scw.Duration `json:"timeout_queue,omitempty"` +} + +func (m *ZonedAPICreateBackendRequest) UnmarshalJSON(b []byte) error { + type tmpType ZonedAPICreateBackendRequest + tmp := struct { + tmpType + TmpTimeoutServer *marshaler.Duration `json:"timeout_server,omitempty"` + TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect,omitempty"` + TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel,omitempty"` + }{} + err := json.Unmarshal(b, &tmp) + if err != nil { + return err + } + + *m = ZonedAPICreateBackendRequest(tmp.tmpType) + m.TimeoutServer = tmp.TmpTimeoutServer.Standard() + m.TimeoutConnect = tmp.TmpTimeoutConnect.Standard() + m.TimeoutTunnel = tmp.TmpTimeoutTunnel.Standard() + return nil +} + +func (m ZonedAPICreateBackendRequest) MarshalJSON() ([]byte, error) { + type tmpType ZonedAPICreateBackendRequest + tmp := struct { + tmpType + TmpTimeoutServer *marshaler.Duration `json:"timeout_server,omitempty"` + TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect,omitempty"` + TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel,omitempty"` + }{ + tmpType: tmpType(m), + TmpTimeoutServer: marshaler.NewDuration(m.TimeoutServer), + TmpTimeoutConnect: marshaler.NewDuration(m.TimeoutConnect), + TmpTimeoutTunnel: marshaler.NewDuration(m.TimeoutTunnel), + } + return json.Marshal(tmp) +} + +// ZonedAPICreateCertificateRequest: zoned api create certificate request. +type ZonedAPICreateCertificateRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // Name: name for the certificate. + Name string `json:"name"` + + // Letsencrypt: object to define a new Let's Encrypt certificate to be generated. + // Precisely one of Letsencrypt, CustomCertificate must be set. + Letsencrypt *CreateCertificateRequestLetsencryptConfig `json:"letsencrypt,omitempty"` + + // CustomCertificate: object to define an existing custom certificate to be imported. + // Precisely one of Letsencrypt, CustomCertificate must be set. + CustomCertificate *CreateCertificateRequestCustomCertificate `json:"custom_certificate,omitempty"` +} + +// ZonedAPICreateFrontendRequest: zoned api create frontend request. +type ZonedAPICreateFrontendRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID (ID of the Load Balancer to attach the frontend to). + LBID string `json:"-"` + + // Name: name for the frontend. + Name string `json:"name"` + + // InboundPort: port the frontend should listen on. + InboundPort int32 `json:"inbound_port"` + + // BackendID: backend ID (ID of the backend the frontend should pass traffic to). + BackendID string `json:"backend_id"` + + // TimeoutClient: maximum allowed inactivity time on the client side. + TimeoutClient *time.Duration `json:"timeout_client,omitempty"` + + // Deprecated: CertificateID: certificate ID, deprecated in favor of certificate_ids array. + CertificateID *string `json:"certificate_id,omitempty"` + + // CertificateIDs: list of SSL/TLS certificate IDs to bind to the frontend. + CertificateIDs *[]string `json:"certificate_ids,omitempty"` + + // EnableHTTP3: defines whether to enable HTTP/3 protocol on the frontend. + EnableHTTP3 bool `json:"enable_http3"` +} + +func (m *ZonedAPICreateFrontendRequest) UnmarshalJSON(b []byte) error { + type tmpType ZonedAPICreateFrontendRequest + tmp := struct { + tmpType + TmpTimeoutClient *marshaler.Duration `json:"timeout_client,omitempty"` + }{} + err := json.Unmarshal(b, &tmp) + if err != nil { + return err + } + + *m = ZonedAPICreateFrontendRequest(tmp.tmpType) + m.TimeoutClient = tmp.TmpTimeoutClient.Standard() + return nil +} + +func (m ZonedAPICreateFrontendRequest) MarshalJSON() ([]byte, error) { + type tmpType ZonedAPICreateFrontendRequest + tmp := struct { + tmpType + TmpTimeoutClient *marshaler.Duration `json:"timeout_client,omitempty"` + }{ + tmpType: tmpType(m), + TmpTimeoutClient: marshaler.NewDuration(m.TimeoutClient), + } + return json.Marshal(tmp) +} + +// ZonedAPICreateIPRequest: zoned api create ip request. +type ZonedAPICreateIPRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Deprecated: OrganizationID: organization ID of the Organization where the IP address should be created. + // Precisely one of ProjectID, OrganizationID must be set. + OrganizationID *string `json:"organization_id,omitempty"` + + // ProjectID: project ID of the Project where the IP address should be created. + // Precisely one of ProjectID, OrganizationID must be set. + ProjectID *string `json:"project_id,omitempty"` + + // Reverse: reverse DNS (domain name) for the IP address. + Reverse *string `json:"reverse,omitempty"` +} + +// ZonedAPICreateLBRequest: zoned api create lb request. +type ZonedAPICreateLBRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Deprecated: OrganizationID: scaleway Organization to create the Load Balancer in. + // Precisely one of ProjectID, OrganizationID must be set. + OrganizationID *string `json:"organization_id,omitempty"` + + // ProjectID: scaleway Project to create the Load Balancer in. + // Precisely one of ProjectID, OrganizationID must be set. + ProjectID *string `json:"project_id,omitempty"` + + // Name: name for the Load Balancer. + Name string `json:"name"` + + // Description: description for the Load Balancer. + Description string `json:"description"` + + // Deprecated: IPID: ID of an existing flexible IP address to attach to the Load Balancer. + IPID *string `json:"ip_id,omitempty"` + + // AssignFlexibleIP: defines whether to automatically assign a flexible public IP to lb. Default value is `false` (do not assign). + AssignFlexibleIP *bool `json:"assign_flexible_ip,omitempty"` + + // Tags: list of tags for the Load Balancer. + Tags []string `json:"tags"` + + // Type: load Balancer commercial offer type. Use the Load Balancer types endpoint to retrieve a list of available offer types. + Type string `json:"type"` + + // SslCompatibilityLevel: determines the minimal SSL version which needs to be supported on the client side, in an SSL/TLS offloading context. Intermediate is suitable for general-purpose servers with a variety of clients, recommended for almost all systems. Modern is suitable for services with clients that support TLS 1.3 and do not need backward compatibility. Old is compatible with a small number of very old clients and should be used only as a last resort. + // Default value: ssl_compatibility_level_unknown + SslCompatibilityLevel SSLCompatibilityLevel `json:"ssl_compatibility_level"` +} + +// ZonedAPICreateRouteRequest: zoned api create route request. +type ZonedAPICreateRouteRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // FrontendID: ID of the source frontend to create the route on. + FrontendID string `json:"frontend_id"` + + // BackendID: ID of the target backend for the route. + BackendID string `json:"backend_id"` + + // Match: object defining the match condition for a route to be applied. If an incoming client session matches the specified condition (i.e. it has a matching SNI value or HTTP Host header value), it will be passed to the target backend. + Match *RouteMatch `json:"match,omitempty"` +} + +// ZonedAPICreateSubscriberRequest: Create a new alert subscriber (webhook or email). +type ZonedAPICreateSubscriberRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Name: subscriber name. + Name string `json:"name"` + + // EmailConfig: email address configuration. + // Precisely one of EmailConfig, WebhookConfig must be set. + EmailConfig *SubscriberEmailConfig `json:"email_config,omitempty"` + + // WebhookConfig: webHook URI configuration. + // Precisely one of EmailConfig, WebhookConfig must be set. + WebhookConfig *SubscriberWebhookConfig `json:"webhook_config,omitempty"` + + // Deprecated: OrganizationID: organization ID to create the subscriber in. + // Precisely one of ProjectID, OrganizationID must be set. + OrganizationID *string `json:"organization_id,omitempty"` + + // ProjectID: project ID to create the subscriber in. + // Precisely one of ProjectID, OrganizationID must be set. + ProjectID *string `json:"project_id,omitempty"` +} + +// ZonedAPIDeleteACLRequest: zoned api delete acl request. +type ZonedAPIDeleteACLRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ACLID: ACL ID. + ACLID string `json:"-"` +} + +// ZonedAPIDeleteBackendRequest: zoned api delete backend request. +type ZonedAPIDeleteBackendRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // BackendID: ID of the backend to delete. + BackendID string `json:"-"` +} + +// ZonedAPIDeleteCertificateRequest: zoned api delete certificate request. +type ZonedAPIDeleteCertificateRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // CertificateID: certificate ID. + CertificateID string `json:"-"` +} + +// ZonedAPIDeleteFrontendRequest: zoned api delete frontend request. +type ZonedAPIDeleteFrontendRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // FrontendID: ID of the frontend to delete. + FrontendID string `json:"-"` +} + +// ZonedAPIDeleteLBRequest: zoned api delete lb request. +type ZonedAPIDeleteLBRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: ID of the Load Balancer to delete. + LBID string `json:"-"` + + // ReleaseIP: defines whether the Load Balancer's flexible IP should be deleted. Set to true to release the flexible IP, or false to keep it available in your account for future Load Balancers. + ReleaseIP bool `json:"release_ip"` +} + +// ZonedAPIDeleteRouteRequest: zoned api delete route request. +type ZonedAPIDeleteRouteRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // RouteID: route ID. + RouteID string `json:"-"` +} + +// ZonedAPIDeleteSubscriberRequest: zoned api delete subscriber request. +type ZonedAPIDeleteSubscriberRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SubscriberID: subscriber ID. + SubscriberID string `json:"-"` +} + +// ZonedAPIDetachPrivateNetworkRequest: zoned api detach private network request. +type ZonedAPIDetachPrivateNetworkRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load balancer ID. + LBID string `json:"-"` + + // PrivateNetworkID: set your instance private network id. + PrivateNetworkID string `json:"-"` +} + +// ZonedAPIGetACLRequest: zoned api get acl request. +type ZonedAPIGetACLRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ACLID: ACL ID. + ACLID string `json:"-"` +} + +// ZonedAPIGetBackendRequest: zoned api get backend request. +type ZonedAPIGetBackendRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // BackendID: backend ID. + BackendID string `json:"-"` +} + +// ZonedAPIGetCertificateRequest: zoned api get certificate request. +type ZonedAPIGetCertificateRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // CertificateID: certificate ID. + CertificateID string `json:"-"` +} + +// ZonedAPIGetFrontendRequest: zoned api get frontend request. +type ZonedAPIGetFrontendRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // FrontendID: frontend ID. + FrontendID string `json:"-"` +} + +// ZonedAPIGetIPRequest: zoned api get ip request. +type ZonedAPIGetIPRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // IPID: IP address ID. + IPID string `json:"-"` +} + +// ZonedAPIGetLBRequest: zoned api get lb request. +type ZonedAPIGetLBRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` +} + +// ZonedAPIGetLBStatsRequest: Get Load Balancer stats. +type ZonedAPIGetLBStatsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // BackendID: ID of the backend. + BackendID *string `json:"backend_id,omitempty"` +} + +// ZonedAPIGetRouteRequest: zoned api get route request. +type ZonedAPIGetRouteRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // RouteID: route ID. + RouteID string `json:"-"` +} + +// ZonedAPIGetSubscriberRequest: zoned api get subscriber request. +type ZonedAPIGetSubscriberRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SubscriberID: subscriber ID. + SubscriberID string `json:"-"` +} + +// ZonedAPIListACLsRequest: zoned api list ac ls request. +type ZonedAPIListACLsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // FrontendID: frontend ID (ACLs attached to this frontend will be returned in the response). + FrontendID string `json:"-"` + + // OrderBy: sort order of ACLs in the response. + // Default value: created_at_asc + OrderBy ListACLRequestOrderBy `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: the number of ACLs to return. + PageSize *uint32 `json:"-"` + + // Name: ACL name to filter for. + Name *string `json:"-"` +} + +// ZonedAPIListBackendStatsRequest: zoned api list backend stats request. +type ZonedAPIListBackendStatsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: number of items to return. + PageSize *uint32 `json:"-"` + + // BackendID: ID of the backend. + BackendID *string `json:"-"` +} + +// ZonedAPIListBackendsRequest: zoned api list backends request. +type ZonedAPIListBackendsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // Name: name of the backend to filter for. + Name *string `json:"-"` + + // OrderBy: sort order of backends in the response. + // Default value: created_at_asc + OrderBy ListBackendsRequestOrderBy `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: number of backends to return. + PageSize *uint32 `json:"-"` +} + +// ZonedAPIListCertificatesRequest: zoned api list certificates request. +type ZonedAPIListCertificatesRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // OrderBy: sort order of certificates in the response. + // Default value: created_at_asc + OrderBy ListCertificatesRequestOrderBy `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: number of certificates to return. + PageSize *uint32 `json:"-"` + + // Name: certificate name to filter for, only certificates of this name will be returned. + Name *string `json:"-"` +} + +// ZonedAPIListFrontendsRequest: zoned api list frontends request. +type ZonedAPIListFrontendsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // Name: name of the frontend to filter for. + Name *string `json:"-"` + + // OrderBy: sort order of frontends in the response. + // Default value: created_at_asc + OrderBy ListFrontendsRequestOrderBy `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: number of frontends to return. + PageSize *uint32 `json:"-"` +} + +// ZonedAPIListIPsRequest: zoned api list i ps request. +type ZonedAPIListIPsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: number of IP addresses to return. + PageSize *uint32 `json:"-"` + + // IPAddress: IP address to filter for. + IPAddress *string `json:"-"` + + // OrganizationID: organization ID to filter for, only Load Balancer IP addresses from this Organization will be returned. + OrganizationID *string `json:"-"` + + // ProjectID: project ID to filter for, only Load Balancer IP addresses from this Project will be returned. + ProjectID *string `json:"-"` +} + +// ZonedAPIListLBPrivateNetworksRequest: zoned api list lb private networks request. +type ZonedAPIListLBPrivateNetworksRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // OrderBy: sort order of Private Network objects in the response. + // Default value: created_at_asc + OrderBy ListPrivateNetworksRequestOrderBy `json:"-"` + + // PageSize: number of objects to return. + PageSize *uint32 `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` +} + +// ZonedAPIListLBTypesRequest: zoned api list lb types request. +type ZonedAPIListLBTypesRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: the number of items to return. + PageSize *uint32 `json:"-"` +} + +// ZonedAPIListLBsRequest: zoned api list l bs request. type ZonedAPIListLBsRequest struct { // Zone: zone to target. If none is passed will use default zone from the config. Zone scw.Zone `json:"-"` + // Name: load Balancer name to filter for. Name *string `json:"-"` + // OrderBy: sort order of Load Balancers in the response. // Default value: created_at_asc OrderBy ListLBsRequestOrderBy `json:"-"` + // PageSize: number of Load Balancers to return. PageSize *uint32 `json:"-"` + // Page: page number to return, from the paginated results. Page *int32 `json:"-"` + // OrganizationID: organization ID to filter for, only Load Balancers from this Organization will be returned. OrganizationID *string `json:"-"` + // ProjectID: project ID to filter for, only Load Balancers from this Project will be returned. ProjectID *string `json:"-"` } -// ListLBs: list Load Balancers. -// List all Load Balancers in the specified zone, for a Scaleway Organization or Scaleway Project. By default, the Load Balancers returned in the list are ordered by creation date in ascending order, though this can be modified via the `order_by` field. +// ZonedAPIListRoutesRequest: zoned api list routes request. +type ZonedAPIListRoutesRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // OrderBy: sort order of routes in the response. + // Default value: created_at_asc + OrderBy ListRoutesRequestOrderBy `json:"-"` + + // PageSize: the number of route objects to return. + PageSize *uint32 `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // FrontendID: frontend ID to filter for, only Routes from this Frontend will be returned. + FrontendID *string `json:"-"` +} + +// ZonedAPIListSubscriberRequest: zoned api list subscriber request. +type ZonedAPIListSubscriberRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // OrderBy: sort order of subscribers in the response. + // Default value: created_at_asc + OrderBy ListSubscriberRequestOrderBy `json:"-"` + + // Page: the page number to return, from the paginated results. + Page *int32 `json:"-"` + + // PageSize: the number of items to return. + PageSize *uint32 `json:"-"` + + // Name: subscriber name to search for. + Name *string `json:"-"` + + // OrganizationID: filter subscribers by Organization ID. + OrganizationID *string `json:"-"` + + // ProjectID: filter subscribers by Project ID. + ProjectID *string `json:"-"` +} + +// ZonedAPIMigrateLBRequest: zoned api migrate lb request. +type ZonedAPIMigrateLBRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // Type: load Balancer type to migrate to (use the List all Load Balancer offer types endpoint to get a list of available offer types). + Type string `json:"type"` +} + +// ZonedAPIReleaseIPRequest: zoned api release ip request. +type ZonedAPIReleaseIPRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // IPID: IP address ID. + IPID string `json:"-"` +} + +// ZonedAPIRemoveBackendServersRequest: zoned api remove backend servers request. +type ZonedAPIRemoveBackendServersRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // BackendID: backend ID. + BackendID string `json:"-"` + + // ServerIP: list of IP addresses to remove from backend servers. + ServerIP []string `json:"server_ip"` +} + +// ZonedAPISetACLsRequest: zoned api set ac ls request. +type ZonedAPISetACLsRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // FrontendID: frontend ID. + FrontendID string `json:"-"` + + // ACLs: list of ACLs for this frontend. Any other existing ACLs on this frontend will be removed. + ACLs []*ACLSpec `json:"acls"` +} + +// ZonedAPISetBackendServersRequest: zoned api set backend servers request. +type ZonedAPISetBackendServersRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // BackendID: backend ID. + BackendID string `json:"-"` + + // ServerIP: list of IP addresses for backend servers. Any other existing backend servers will be removed. + ServerIP []string `json:"server_ip"` +} + +// ZonedAPISubscribeToLBRequest: zoned api subscribe to lb request. +type ZonedAPISubscribeToLBRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // SubscriberID: subscriber ID. + SubscriberID string `json:"subscriber_id"` +} + +// ZonedAPIUnsubscribeFromLBRequest: zoned api unsubscribe from lb request. +type ZonedAPIUnsubscribeFromLBRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` +} + +// ZonedAPIUpdateACLRequest: zoned api update acl request. +type ZonedAPIUpdateACLRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // ACLID: ACL ID. + ACLID string `json:"-"` + + // Name: ACL name. + Name string `json:"name"` + + // Action: action to take when incoming traffic matches an ACL filter. + Action *ACLAction `json:"action"` + + // Match: ACL match filter object. One of `ip_subnet` or `http_filter` & `http_filter_value` are required. + Match *ACLMatch `json:"match,omitempty"` + + // Index: priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed). + Index int32 `json:"index"` + + // Description: ACL description. + Description *string `json:"description,omitempty"` +} + +// ZonedAPIUpdateBackendRequest: zoned api update backend request. +type ZonedAPIUpdateBackendRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // BackendID: backend ID. + BackendID string `json:"-"` + + // Name: backend name. + Name string `json:"name"` + + // ForwardProtocol: protocol to be used by the backend when forwarding traffic to backend servers. + // Default value: tcp + ForwardProtocol Protocol `json:"forward_protocol"` + + // ForwardPort: port to be used by the backend when forwarding traffic to backend servers. + ForwardPort int32 `json:"forward_port"` + + // ForwardPortAlgorithm: load balancing algorithm to be used when determining which backend server to forward new traffic to. + // Default value: roundrobin + ForwardPortAlgorithm ForwardPortAlgorithm `json:"forward_port_algorithm"` + + // StickySessions: defines whether to activate sticky sessions (binding a particular session to a particular backend server) and the method to use if so. None disables sticky sessions. Cookie-based uses an HTTP cookie to stick a session to a backend server. Table-based uses the source (client) IP address to stick a session to a backend server. + // Default value: none + StickySessions StickySessionsType `json:"sticky_sessions"` + + // StickySessionsCookieName: cookie name for cookie-based sticky sessions. + StickySessionsCookieName string `json:"sticky_sessions_cookie_name"` + + // Deprecated: SendProxyV2: deprecated in favor of proxy_protocol field. + SendProxyV2 *bool `json:"send_proxy_v2,omitempty"` + + // TimeoutServer: maximum allowed time for a backend server to process a request. + TimeoutServer *time.Duration `json:"timeout_server,omitempty"` + + // TimeoutConnect: maximum allowed time for establishing a connection to a backend server. + TimeoutConnect *time.Duration `json:"timeout_connect,omitempty"` + + // TimeoutTunnel: maximum allowed tunnel inactivity time after Websocket is established (takes precedence over client and server timeout). + TimeoutTunnel *time.Duration `json:"timeout_tunnel,omitempty"` + + // OnMarkedDownAction: action to take when a backend server is marked as down. + // Default value: on_marked_down_action_none + OnMarkedDownAction OnMarkedDownAction `json:"on_marked_down_action"` + + // ProxyProtocol: protocol to use between the Load Balancer and backend servers. Allows the backend servers to be informed of the client's real IP address. The PROXY protocol must be supported by the backend servers' software. + // Default value: proxy_protocol_unknown + ProxyProtocol ProxyProtocol `json:"proxy_protocol"` + + // FailoverHost: scaleway S3 bucket website to be served as failover if all backend servers are down, e.g. failover-website.s3-website.fr-par.scw.cloud. + FailoverHost *string `json:"failover_host,omitempty"` + + // SslBridging: defines whether to enable SSL bridging between the Load Balancer and backend servers. + SslBridging *bool `json:"ssl_bridging,omitempty"` + + // IgnoreSslServerVerify: defines whether the server certificate verification should be ignored. + IgnoreSslServerVerify *bool `json:"ignore_ssl_server_verify,omitempty"` + + // RedispatchAttemptCount: whether to use another backend server on each attempt. + RedispatchAttemptCount *int32 `json:"redispatch_attempt_count,omitempty"` + + // MaxRetries: number of retries when a backend server connection failed. + MaxRetries *int32 `json:"max_retries,omitempty"` + + // MaxConnections: maximum number of connections allowed per backend server. + MaxConnections *int32 `json:"max_connections,omitempty"` + + // TimeoutQueue: maximum time for a request to be left pending in queue when `max_connections` is reached. + TimeoutQueue *scw.Duration `json:"timeout_queue,omitempty"` +} + +func (m *ZonedAPIUpdateBackendRequest) UnmarshalJSON(b []byte) error { + type tmpType ZonedAPIUpdateBackendRequest + tmp := struct { + tmpType + TmpTimeoutServer *marshaler.Duration `json:"timeout_server,omitempty"` + TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect,omitempty"` + TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel,omitempty"` + }{} + err := json.Unmarshal(b, &tmp) + if err != nil { + return err + } + + *m = ZonedAPIUpdateBackendRequest(tmp.tmpType) + m.TimeoutServer = tmp.TmpTimeoutServer.Standard() + m.TimeoutConnect = tmp.TmpTimeoutConnect.Standard() + m.TimeoutTunnel = tmp.TmpTimeoutTunnel.Standard() + return nil +} + +func (m ZonedAPIUpdateBackendRequest) MarshalJSON() ([]byte, error) { + type tmpType ZonedAPIUpdateBackendRequest + tmp := struct { + tmpType + TmpTimeoutServer *marshaler.Duration `json:"timeout_server,omitempty"` + TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect,omitempty"` + TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel,omitempty"` + }{ + tmpType: tmpType(m), + TmpTimeoutServer: marshaler.NewDuration(m.TimeoutServer), + TmpTimeoutConnect: marshaler.NewDuration(m.TimeoutConnect), + TmpTimeoutTunnel: marshaler.NewDuration(m.TimeoutTunnel), + } + return json.Marshal(tmp) +} + +// ZonedAPIUpdateCertificateRequest: zoned api update certificate request. +type ZonedAPIUpdateCertificateRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // CertificateID: certificate ID. + CertificateID string `json:"-"` + + // Name: certificate name. + Name string `json:"name"` +} + +// ZonedAPIUpdateFrontendRequest: zoned api update frontend request. +type ZonedAPIUpdateFrontendRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // FrontendID: frontend ID. + FrontendID string `json:"-"` + + // Name: frontend name. + Name string `json:"name"` + + // InboundPort: port the frontend should listen on. + InboundPort int32 `json:"inbound_port"` + + // BackendID: backend ID (ID of the backend the frontend should pass traffic to). + BackendID string `json:"backend_id"` + + // TimeoutClient: maximum allowed inactivity time on the client side. + TimeoutClient *time.Duration `json:"timeout_client,omitempty"` + + // Deprecated: CertificateID: certificate ID, deprecated in favor of certificate_ids array. + CertificateID *string `json:"certificate_id,omitempty"` + + // CertificateIDs: list of SSL/TLS certificate IDs to bind to the frontend. + CertificateIDs *[]string `json:"certificate_ids,omitempty"` + + // EnableHTTP3: defines whether to enable HTTP/3 protocol on the frontend. + EnableHTTP3 bool `json:"enable_http3"` +} + +func (m *ZonedAPIUpdateFrontendRequest) UnmarshalJSON(b []byte) error { + type tmpType ZonedAPIUpdateFrontendRequest + tmp := struct { + tmpType + TmpTimeoutClient *marshaler.Duration `json:"timeout_client,omitempty"` + }{} + err := json.Unmarshal(b, &tmp) + if err != nil { + return err + } + + *m = ZonedAPIUpdateFrontendRequest(tmp.tmpType) + m.TimeoutClient = tmp.TmpTimeoutClient.Standard() + return nil +} + +func (m ZonedAPIUpdateFrontendRequest) MarshalJSON() ([]byte, error) { + type tmpType ZonedAPIUpdateFrontendRequest + tmp := struct { + tmpType + TmpTimeoutClient *marshaler.Duration `json:"timeout_client,omitempty"` + }{ + tmpType: tmpType(m), + TmpTimeoutClient: marshaler.NewDuration(m.TimeoutClient), + } + return json.Marshal(tmp) +} + +// ZonedAPIUpdateHealthCheckRequest: zoned api update health check request. +type ZonedAPIUpdateHealthCheckRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // BackendID: backend ID. + BackendID string `json:"-"` + + // Port: port to use for the backend server health check. + Port int32 `json:"port"` + + // CheckDelay: time to wait between two consecutive health checks. + CheckDelay *time.Duration `json:"check_delay,omitempty"` + + // CheckTimeout: maximum time a backend server has to reply to the health check. + CheckTimeout *time.Duration `json:"check_timeout,omitempty"` + + // CheckMaxRetries: number of consecutive unsuccessful health checks after which the server will be considered dead. + CheckMaxRetries int32 `json:"check_max_retries"` + + // CheckSendProxy: defines whether proxy protocol should be activated for the health check. + CheckSendProxy bool `json:"check_send_proxy"` + + // TCPConfig: object to configure a basic TCP health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + TCPConfig *HealthCheckTCPConfig `json:"tcp_config,omitempty"` + + // MysqlConfig: object to configure a MySQL health check. The check requires MySQL >=3.22, for older versions, use a TCP health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + MysqlConfig *HealthCheckMysqlConfig `json:"mysql_config,omitempty"` + + // PgsqlConfig: object to configure a PostgreSQL health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + PgsqlConfig *HealthCheckPgsqlConfig `json:"pgsql_config,omitempty"` + + // LdapConfig: object to configure an LDAP health check. The response is analyzed to find the LDAPv3 response message. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + LdapConfig *HealthCheckLdapConfig `json:"ldap_config,omitempty"` + + // RedisConfig: object to configure a Redis health check. The response is analyzed to find the +PONG response message. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + RedisConfig *HealthCheckRedisConfig `json:"redis_config,omitempty"` + + // HTTPConfig: object to configure an HTTP health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + HTTPConfig *HealthCheckHTTPConfig `json:"http_config,omitempty"` + + // HTTPSConfig: object to configure an HTTPS health check. + // Precisely one of TCPConfig, MysqlConfig, PgsqlConfig, LdapConfig, RedisConfig, HTTPConfig, HTTPSConfig must be set. + HTTPSConfig *HealthCheckHTTPSConfig `json:"https_config,omitempty"` + + // TransientCheckDelay: time to wait between two consecutive health checks when a backend server is in a transient state (going UP or DOWN). + TransientCheckDelay *scw.Duration `json:"transient_check_delay,omitempty"` +} + +func (m *ZonedAPIUpdateHealthCheckRequest) UnmarshalJSON(b []byte) error { + type tmpType ZonedAPIUpdateHealthCheckRequest + tmp := struct { + tmpType + TmpCheckDelay *marshaler.Duration `json:"check_delay,omitempty"` + TmpCheckTimeout *marshaler.Duration `json:"check_timeout,omitempty"` + }{} + err := json.Unmarshal(b, &tmp) + if err != nil { + return err + } + + *m = ZonedAPIUpdateHealthCheckRequest(tmp.tmpType) + m.CheckDelay = tmp.TmpCheckDelay.Standard() + m.CheckTimeout = tmp.TmpCheckTimeout.Standard() + return nil +} + +func (m ZonedAPIUpdateHealthCheckRequest) MarshalJSON() ([]byte, error) { + type tmpType ZonedAPIUpdateHealthCheckRequest + tmp := struct { + tmpType + TmpCheckDelay *marshaler.Duration `json:"check_delay,omitempty"` + TmpCheckTimeout *marshaler.Duration `json:"check_timeout,omitempty"` + }{ + tmpType: tmpType(m), + TmpCheckDelay: marshaler.NewDuration(m.CheckDelay), + TmpCheckTimeout: marshaler.NewDuration(m.CheckTimeout), + } + return json.Marshal(tmp) +} + +// ZonedAPIUpdateIPRequest: zoned api update ip request. +type ZonedAPIUpdateIPRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // IPID: IP address ID. + IPID string `json:"-"` + + // Reverse: reverse DNS (domain name) for the IP address. + Reverse *string `json:"reverse,omitempty"` +} + +// ZonedAPIUpdateLBRequest: zoned api update lb request. +type ZonedAPIUpdateLBRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // LBID: load Balancer ID. + LBID string `json:"-"` + + // Name: load Balancer name. + Name string `json:"name"` + + // Description: load Balancer description. + Description string `json:"description"` + + // Tags: list of tags for the Load Balancer. + Tags []string `json:"tags"` + + // SslCompatibilityLevel: determines the minimal SSL version which needs to be supported on the client side, in an SSL/TLS offloading context. Intermediate is suitable for general-purpose servers with a variety of clients, recommended for almost all systems. Modern is suitable for services with clients that support TLS 1.3 and don't need backward compatibility. Old is compatible with a small number of very old clients and should be used only as a last resort. + // Default value: ssl_compatibility_level_unknown + SslCompatibilityLevel SSLCompatibilityLevel `json:"ssl_compatibility_level"` +} + +// ZonedAPIUpdateRouteRequest: zoned api update route request. +type ZonedAPIUpdateRouteRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // RouteID: route ID. + RouteID string `json:"-"` + + // BackendID: ID of the target backend for the route. + BackendID string `json:"backend_id"` + + // Match: object defining the match condition for a route to be applied. If an incoming client session matches the specified condition (i.e. it has a matching SNI value or HTTP Host header value), it will be passed to the target backend. + Match *RouteMatch `json:"match,omitempty"` +} + +// ZonedAPIUpdateSubscriberRequest: zoned api update subscriber request. +type ZonedAPIUpdateSubscriberRequest struct { + // Zone: zone to target. If none is passed will use default zone from the config. + Zone scw.Zone `json:"-"` + + // SubscriberID: subscriber ID. + SubscriberID string `json:"-"` + + // Name: subscriber name. + Name string `json:"name"` + + // EmailConfig: email address configuration. + // Precisely one of EmailConfig, WebhookConfig must be set. + EmailConfig *SubscriberEmailConfig `json:"email_config,omitempty"` + + // WebhookConfig: webhook URI configuration. + // Precisely one of EmailConfig, WebhookConfig must be set. + WebhookConfig *SubscriberWebhookConfig `json:"webhook_config,omitempty"` +} + +// This API allows you to manage your Scaleway Load Balancer services. +type ZonedAPI struct { + client *scw.Client +} + +// NewZonedAPI returns a ZonedAPI object from a Scaleway client. +func NewZonedAPI(client *scw.Client) *ZonedAPI { + return &ZonedAPI{ + client: client, + } +} +func (s *ZonedAPI) Zones() []scw.Zone { + return []scw.Zone{scw.ZoneFrPar1, scw.ZoneFrPar2, scw.ZoneNlAms1, scw.ZoneNlAms2, scw.ZoneNlAms3, scw.ZonePlWaw1, scw.ZonePlWaw2, scw.ZonePlWaw3} +} + +// ListLBs: List all Load Balancers in the specified zone, for a Scaleway Organization or Scaleway Project. By default, the Load Balancers returned in the list are ordered by creation date in ascending order, though this can be modified via the `order_by` field. func (s *ZonedAPI) ListLBs(req *ZonedAPIListLBsRequest, opts ...scw.RequestOption) (*ListLBsResponse, error) { var err error @@ -1639,10 +4285,9 @@ func (s *ZonedAPI) ListLBs(req *ZonedAPIListLBsRequest, opts ...scw.RequestOptio } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs", + Query: query, } var resp ListLBsResponse @@ -1654,52 +4299,25 @@ func (s *ZonedAPI) ListLBs(req *ZonedAPIListLBsRequest, opts ...scw.RequestOptio return &resp, nil } -type ZonedAPICreateLBRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // Deprecated: OrganizationID: scaleway Organization to create the Load Balancer in. - // Precisely one of OrganizationID, ProjectID must be set. - OrganizationID *string `json:"organization_id,omitempty"` - // ProjectID: scaleway Project to create the Load Balancer in. - // Precisely one of OrganizationID, ProjectID must be set. - ProjectID *string `json:"project_id,omitempty"` - // Name: name for the Load Balancer. - Name string `json:"name"` - // Description: description for the Load Balancer. - Description string `json:"description"` - // Deprecated: IPID: ID of an existing flexible IP address to attach to the Load Balancer. - IPID *string `json:"ip_id,omitempty"` - // AssignFlexibleIP: defines whether to automatically assign a flexible public IP to lb. Default value is `false` (do not assign). - AssignFlexibleIP *bool `json:"assign_flexible_ip"` - // Tags: list of tags for the Load Balancer. - Tags []string `json:"tags"` - // Type: load Balancer commercial offer type. Use the Load Balancer types endpoint to retrieve a list of available offer types. - Type string `json:"type"` - // SslCompatibilityLevel: determines the minimal SSL version which needs to be supported on the client side, in an SSL/TLS offloading context. Intermediate is suitable for general-purpose servers with a variety of clients, recommended for almost all systems. Modern is suitable for services with clients that support TLS 1.3 and do not need backward compatibility. Old is compatible with a small number of very old clients and should be used only as a last resort. - // Default value: ssl_compatibility_level_unknown - SslCompatibilityLevel SSLCompatibilityLevel `json:"ssl_compatibility_level"` -} - -// CreateLB: create a Load Balancer. -// Create a new Load Balancer. Note that the Load Balancer will be created without frontends or backends; these must be created separately via the dedicated endpoints. +// CreateLB: Create a new Load Balancer. Note that the Load Balancer will be created without frontends or backends; these must be created separately via the dedicated endpoints. func (s *ZonedAPI) CreateLB(req *ZonedAPICreateLBRequest, opts ...scw.RequestOption) (*LB, error) { var err error - defaultProjectID, exist := s.client.GetDefaultProjectID() - if exist && req.OrganizationID == nil && req.ProjectID == nil { - req.ProjectID = &defaultProjectID - } - - defaultOrganizationID, exist := s.client.GetDefaultOrganizationID() - if exist && req.OrganizationID == nil && req.ProjectID == nil { - req.OrganizationID = &defaultOrganizationID - } - if req.Zone == "" { defaultZone, _ := s.client.GetDefaultZone() req.Zone = defaultZone } + defaultProjectID, exist := s.client.GetDefaultProjectID() + if exist && req.ProjectID == nil && req.OrganizationID == nil { + req.ProjectID = &defaultProjectID + } + + defaultOrganizationID, exist := s.client.GetDefaultOrganizationID() + if exist && req.ProjectID == nil && req.OrganizationID == nil { + req.OrganizationID = &defaultOrganizationID + } + if req.Name == "" { req.Name = namegenerator.GetRandomName("lb") } @@ -1709,9 +4327,8 @@ func (s *ZonedAPI) CreateLB(req *ZonedAPICreateLBRequest, opts ...scw.RequestOpt } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs", } err = scwReq.SetBody(req) @@ -1728,15 +4345,7 @@ func (s *ZonedAPI) CreateLB(req *ZonedAPICreateLBRequest, opts ...scw.RequestOpt return &resp, nil } -type ZonedAPIGetLBRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` -} - -// GetLB: get a Load Balancer. -// Retrieve information about an existing Load Balancer, specified by its Load Balancer ID. Its full details, including name, status and IP address, are returned in the response object. +// GetLB: Retrieve information about an existing Load Balancer, specified by its Load Balancer ID. Its full details, including name, status and IP address, are returned in the response object. func (s *ZonedAPI) GetLB(req *ZonedAPIGetLBRequest, opts ...scw.RequestOption) (*LB, error) { var err error @@ -1754,9 +4363,8 @@ func (s *ZonedAPI) GetLB(req *ZonedAPIGetLBRequest, opts ...scw.RequestOption) ( } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "", } var resp LB @@ -1768,24 +4376,7 @@ func (s *ZonedAPI) GetLB(req *ZonedAPIGetLBRequest, opts ...scw.RequestOption) ( return &resp, nil } -type ZonedAPIUpdateLBRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // Name: load Balancer name. - Name string `json:"name"` - // Description: load Balancer description. - Description string `json:"description"` - // Tags: list of tags for the Load Balancer. - Tags []string `json:"tags"` - // SslCompatibilityLevel: determines the minimal SSL version which needs to be supported on the client side, in an SSL/TLS offloading context. Intermediate is suitable for general-purpose servers with a variety of clients, recommended for almost all systems. Modern is suitable for services with clients that support TLS 1.3 and don't need backward compatibility. Old is compatible with a small number of very old clients and should be used only as a last resort. - // Default value: ssl_compatibility_level_unknown - SslCompatibilityLevel SSLCompatibilityLevel `json:"ssl_compatibility_level"` -} - -// UpdateLB: update a Load Balancer. -// Update the parameters of an existing Load Balancer, specified by its Load Balancer ID. Note that the request type is PUT and not PATCH. You must set all parameters. +// UpdateLB: Update the parameters of an existing Load Balancer, specified by its Load Balancer ID. Note that the request type is PUT and not PATCH. You must set all parameters. func (s *ZonedAPI) UpdateLB(req *ZonedAPIUpdateLBRequest, opts ...scw.RequestOption) (*LB, error) { var err error @@ -1803,9 +4394,8 @@ func (s *ZonedAPI) UpdateLB(req *ZonedAPIUpdateLBRequest, opts ...scw.RequestOpt } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "", } err = scwReq.SetBody(req) @@ -1822,17 +4412,7 @@ func (s *ZonedAPI) UpdateLB(req *ZonedAPIUpdateLBRequest, opts ...scw.RequestOpt return &resp, nil } -type ZonedAPIDeleteLBRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: ID of the Load Balancer to delete. - LBID string `json:"-"` - // ReleaseIP: defines whether the Load Balancer's flexible IP should be deleted. Set to true to release the flexible IP, or false to keep it available in your account for future Load Balancers. - ReleaseIP bool `json:"-"` -} - -// DeleteLB: delete a Load Balancer. -// Delete an existing Load Balancer, specified by its Load Balancer ID. Deleting a Load Balancer is permanent, and cannot be undone. The Load Balancer's flexible IP address can either be deleted with the Load Balancer, or kept in your account for future use. +// DeleteLB: Delete an existing Load Balancer, specified by its Load Balancer ID. Deleting a Load Balancer is permanent, and cannot be undone. The Load Balancer's flexible IP address can either be deleted with the Load Balancer, or kept in your account for future use. func (s *ZonedAPI) DeleteLB(req *ZonedAPIDeleteLBRequest, opts ...scw.RequestOption) error { var err error @@ -1853,10 +4433,9 @@ func (s *ZonedAPI) DeleteLB(req *ZonedAPIDeleteLBRequest, opts ...scw.RequestOpt } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "", - Query: query, - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "", + Query: query, } err = s.client.Do(scwReq, nil, opts...) @@ -1866,17 +4445,7 @@ func (s *ZonedAPI) DeleteLB(req *ZonedAPIDeleteLBRequest, opts ...scw.RequestOpt return nil } -type ZonedAPIMigrateLBRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // Type: load Balancer type to migrate to (use the List all Load Balancer offer types endpoint to get a list of available offer types). - Type string `json:"type"` -} - -// MigrateLB: migrate a Load Balancer. -// Migrate an existing Load Balancer from one commercial type to another. Allows you to scale your Load Balancer up or down in terms of bandwidth or multi-cloud provision. +// MigrateLB: Migrate an existing Load Balancer from one commercial type to another. Allows you to scale your Load Balancer up or down in terms of bandwidth or multi-cloud provision. func (s *ZonedAPI) MigrateLB(req *ZonedAPIMigrateLBRequest, opts ...scw.RequestOption) (*LB, error) { var err error @@ -1894,9 +4463,8 @@ func (s *ZonedAPI) MigrateLB(req *ZonedAPIMigrateLBRequest, opts ...scw.RequestO } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/migrate", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/migrate", } err = scwReq.SetBody(req) @@ -1913,23 +4481,7 @@ func (s *ZonedAPI) MigrateLB(req *ZonedAPIMigrateLBRequest, opts ...scw.RequestO return &resp, nil } -type ZonedAPIListIPsRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: number of IP addresses to return. - PageSize *uint32 `json:"-"` - // IPAddress: IP address to filter for. - IPAddress *string `json:"-"` - // OrganizationID: organization ID to filter for, only Load Balancer IP addresses from this Organization will be returned. - OrganizationID *string `json:"-"` - // ProjectID: project ID to filter for, only Load Balancer IP addresses from this Project will be returned. - ProjectID *string `json:"-"` -} - -// ListIPs: list IP addresses. -// List the Load Balancer flexible IP addresses held in the account (filtered by Organization ID or Project ID). It is also possible to search for a specific IP address. +// ListIPs: List the Load Balancer flexible IP addresses held in the account (filtered by Organization ID or Project ID). It is also possible to search for a specific IP address. func (s *ZonedAPI) ListIPs(req *ZonedAPIListIPsRequest, opts ...scw.RequestOption) (*ListIPsResponse, error) { var err error @@ -1955,10 +4507,9 @@ func (s *ZonedAPI) ListIPs(req *ZonedAPIListIPsRequest, opts ...scw.RequestOptio } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/ips", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/ips", + Query: query, } var resp ListIPsResponse @@ -1970,47 +4521,32 @@ func (s *ZonedAPI) ListIPs(req *ZonedAPIListIPsRequest, opts ...scw.RequestOptio return &resp, nil } -type ZonedAPICreateIPRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // Deprecated: OrganizationID: organization ID of the Organization where the IP address should be created. - // Precisely one of OrganizationID, ProjectID must be set. - OrganizationID *string `json:"organization_id,omitempty"` - // ProjectID: project ID of the Project where the IP address should be created. - // Precisely one of OrganizationID, ProjectID must be set. - ProjectID *string `json:"project_id,omitempty"` - // Reverse: reverse DNS (domain name) for the IP address. - Reverse *string `json:"reverse"` -} - -// CreateIP: create an IP address. -// Create a new Load Balancer flexible IP address, in the specified Scaleway Project. This can be attached to new Load Balancers created in the future. +// CreateIP: Create a new Load Balancer flexible IP address, in the specified Scaleway Project. This can be attached to new Load Balancers created in the future. func (s *ZonedAPI) CreateIP(req *ZonedAPICreateIPRequest, opts ...scw.RequestOption) (*IP, error) { var err error - defaultProjectID, exist := s.client.GetDefaultProjectID() - if exist && req.OrganizationID == nil && req.ProjectID == nil { - req.ProjectID = &defaultProjectID - } - - defaultOrganizationID, exist := s.client.GetDefaultOrganizationID() - if exist && req.OrganizationID == nil && req.ProjectID == nil { - req.OrganizationID = &defaultOrganizationID - } - if req.Zone == "" { defaultZone, _ := s.client.GetDefaultZone() req.Zone = defaultZone } + defaultProjectID, exist := s.client.GetDefaultProjectID() + if exist && req.ProjectID == nil && req.OrganizationID == nil { + req.ProjectID = &defaultProjectID + } + + defaultOrganizationID, exist := s.client.GetDefaultOrganizationID() + if exist && req.ProjectID == nil && req.OrganizationID == nil { + req.OrganizationID = &defaultOrganizationID + } + if fmt.Sprint(req.Zone) == "" { return nil, errors.New("field Zone cannot be empty in request") } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/ips", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/ips", } err = scwReq.SetBody(req) @@ -2027,15 +4563,7 @@ func (s *ZonedAPI) CreateIP(req *ZonedAPICreateIPRequest, opts ...scw.RequestOpt return &resp, nil } -type ZonedAPIGetIPRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // IPID: IP address ID. - IPID string `json:"-"` -} - -// GetIP: get an IP address. -// Retrieve the full details of a Load Balancer flexible IP address. +// GetIP: Retrieve the full details of a Load Balancer flexible IP address. func (s *ZonedAPI) GetIP(req *ZonedAPIGetIPRequest, opts ...scw.RequestOption) (*IP, error) { var err error @@ -2053,9 +4581,8 @@ func (s *ZonedAPI) GetIP(req *ZonedAPIGetIPRequest, opts ...scw.RequestOption) ( } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IPID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IPID) + "", } var resp IP @@ -2067,15 +4594,7 @@ func (s *ZonedAPI) GetIP(req *ZonedAPIGetIPRequest, opts ...scw.RequestOption) ( return &resp, nil } -type ZonedAPIReleaseIPRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // IPID: IP address ID. - IPID string `json:"-"` -} - -// ReleaseIP: delete an IP address. -// Delete a Load Balancer flexible IP address. This action is irreversible, and cannot be undone. +// ReleaseIP: Delete a Load Balancer flexible IP address. This action is irreversible, and cannot be undone. func (s *ZonedAPI) ReleaseIP(req *ZonedAPIReleaseIPRequest, opts ...scw.RequestOption) error { var err error @@ -2093,9 +4612,8 @@ func (s *ZonedAPI) ReleaseIP(req *ZonedAPIReleaseIPRequest, opts ...scw.RequestO } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IPID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IPID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -2105,17 +4623,7 @@ func (s *ZonedAPI) ReleaseIP(req *ZonedAPIReleaseIPRequest, opts ...scw.RequestO return nil } -type ZonedAPIUpdateIPRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // IPID: IP address ID. - IPID string `json:"-"` - // Reverse: reverse DNS (domain name) for the IP address. - Reverse *string `json:"reverse"` -} - -// UpdateIP: update an IP address. -// Update the reverse DNS of a Load Balancer flexible IP address. +// UpdateIP: Update the reverse DNS of a Load Balancer flexible IP address. func (s *ZonedAPI) UpdateIP(req *ZonedAPIUpdateIPRequest, opts ...scw.RequestOption) (*IP, error) { var err error @@ -2133,9 +4641,8 @@ func (s *ZonedAPI) UpdateIP(req *ZonedAPIUpdateIPRequest, opts ...scw.RequestOpt } scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IPID) + "", - Headers: http.Header{}, + Method: "PATCH", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IPID) + "", } err = scwReq.SetBody(req) @@ -2152,24 +4659,7 @@ func (s *ZonedAPI) UpdateIP(req *ZonedAPIUpdateIPRequest, opts ...scw.RequestOpt return &resp, nil } -type ZonedAPIListBackendsRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // Name: name of the backend to filter for. - Name *string `json:"-"` - // OrderBy: sort order of backends in the response. - // Default value: created_at_asc - OrderBy ListBackendsRequestOrderBy `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: number of backends to return. - PageSize *uint32 `json:"-"` -} - -// ListBackends: list the backends of a given Load Balancer. -// List all the backends of a Load Balancer, specified by its Load Balancer ID. By default, results are returned in ascending order by the creation date of each backend. The response is an array of backend objects, containing full details of each one including their configuration parameters such as protocol, port and forwarding algorithm. +// ListBackends: List all the backends of a Load Balancer, specified by its Load Balancer ID. By default, results are returned in ascending order by the creation date of each backend. The response is an array of backend objects, containing full details of each one including their configuration parameters such as protocol, port and forwarding algorithm. func (s *ZonedAPI) ListBackends(req *ZonedAPIListBackendsRequest, opts ...scw.RequestOption) (*ListBackendsResponse, error) { var err error @@ -2198,10 +4688,9 @@ func (s *ZonedAPI) ListBackends(req *ZonedAPIListBackendsRequest, opts ...scw.Re } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/backends", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/backends", + Query: query, } var resp ListBackendsResponse @@ -2213,105 +4702,7 @@ func (s *ZonedAPI) ListBackends(req *ZonedAPIListBackendsRequest, opts ...scw.Re return &resp, nil } -type ZonedAPICreateBackendRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // Name: name for the backend. - Name string `json:"name"` - // ForwardProtocol: protocol to be used by the backend when forwarding traffic to backend servers. - // Default value: tcp - ForwardProtocol Protocol `json:"forward_protocol"` - // ForwardPort: port to be used by the backend when forwarding traffic to backend servers. - ForwardPort int32 `json:"forward_port"` - // ForwardPortAlgorithm: load balancing algorithm to be used when determining which backend server to forward new traffic to. - // Default value: roundrobin - ForwardPortAlgorithm ForwardPortAlgorithm `json:"forward_port_algorithm"` - // StickySessions: defines whether to activate sticky sessions (binding a particular session to a particular backend server) and the method to use if so. None disables sticky sessions. Cookie-based uses an HTTP cookie TO stick a session to a backend server. Table-based uses the source (client) IP address to stick a session to a backend server. - // Default value: none - StickySessions StickySessionsType `json:"sticky_sessions"` - // StickySessionsCookieName: cookie name for cookie-based sticky sessions. - StickySessionsCookieName string `json:"sticky_sessions_cookie_name"` - // HealthCheck: object defining the health check to be carried out by the backend when checking the status and health of backend servers. - HealthCheck *HealthCheck `json:"health_check"` - // ServerIP: list of backend server IP addresses (IPv4 or IPv6) the backend should forward traffic to. - ServerIP []string `json:"server_ip"` - // Deprecated: SendProxyV2: deprecated in favor of proxy_protocol field. - SendProxyV2 *bool `json:"send_proxy_v2,omitempty"` - // TimeoutServer: maximum allowed time for a backend server to process a request. - // Default value: 300000 - TimeoutServer *time.Duration `json:"timeout_server"` - // TimeoutConnect: maximum allowed time for establishing a connection to a backend server. - // Default value: 5000 - TimeoutConnect *time.Duration `json:"timeout_connect"` - // TimeoutTunnel: maximum allowed tunnel inactivity time after Websocket is established (takes precedence over client and server timeout). - // Default value: 900000 - TimeoutTunnel *time.Duration `json:"timeout_tunnel"` - // OnMarkedDownAction: action to take when a backend server is marked as down. - // Default value: on_marked_down_action_none - OnMarkedDownAction OnMarkedDownAction `json:"on_marked_down_action"` - // ProxyProtocol: protocol to use between the Load Balancer and backend servers. Allows the backend servers to be informed of the client's real IP address. The PROXY protocol must be supported by the backend servers' software. - // Default value: proxy_protocol_unknown - ProxyProtocol ProxyProtocol `json:"proxy_protocol"` - // FailoverHost: scaleway S3 bucket website to be served as failover if all backend servers are down, e.g. failover-website.s3-website.fr-par.scw.cloud. - FailoverHost *string `json:"failover_host"` - // SslBridging: defines whether to enable SSL bridging between the Load Balancer and backend servers. - SslBridging *bool `json:"ssl_bridging"` - // IgnoreSslServerVerify: defines whether the server certificate verification should be ignored. - IgnoreSslServerVerify *bool `json:"ignore_ssl_server_verify"` - // RedispatchAttemptCount: whether to use another backend server on each attempt. - RedispatchAttemptCount *int32 `json:"redispatch_attempt_count"` - // MaxRetries: number of retries when a backend server connection failed. - MaxRetries *int32 `json:"max_retries"` - // MaxConnections: maximum number of connections allowed per backend server. - MaxConnections *int32 `json:"max_connections"` - // TimeoutQueue: maximum time for a request to be left pending in queue when `max_connections` is reached. - TimeoutQueue *scw.Duration `json:"timeout_queue"` -} - -func (m *ZonedAPICreateBackendRequest) UnmarshalJSON(b []byte) error { - type tmpType ZonedAPICreateBackendRequest - tmp := struct { - tmpType - - TmpTimeoutServer *marshaler.Duration `json:"timeout_server"` - TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect"` - TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel"` - }{} - err := json.Unmarshal(b, &tmp) - if err != nil { - return err - } - - *m = ZonedAPICreateBackendRequest(tmp.tmpType) - - m.TimeoutServer = tmp.TmpTimeoutServer.Standard() - m.TimeoutConnect = tmp.TmpTimeoutConnect.Standard() - m.TimeoutTunnel = tmp.TmpTimeoutTunnel.Standard() - return nil -} - -func (m ZonedAPICreateBackendRequest) MarshalJSON() ([]byte, error) { - type tmpType ZonedAPICreateBackendRequest - tmp := struct { - tmpType - - TmpTimeoutServer *marshaler.Duration `json:"timeout_server"` - TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect"` - TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel"` - }{ - tmpType: tmpType(m), - - TmpTimeoutServer: marshaler.NewDuration(m.TimeoutServer), - TmpTimeoutConnect: marshaler.NewDuration(m.TimeoutConnect), - TmpTimeoutTunnel: marshaler.NewDuration(m.TimeoutTunnel), - } - return json.Marshal(tmp) -} - -// CreateBackend: create a backend for a given Load Balancer. -// Create a new backend for a given Load Balancer, specifying its full configuration including protocol, port and forwarding algorithm. +// CreateBackend: Create a new backend for a given Load Balancer, specifying its full configuration including protocol, port and forwarding algorithm. func (s *ZonedAPI) CreateBackend(req *ZonedAPICreateBackendRequest, opts ...scw.RequestOption) (*Backend, error) { var err error @@ -2333,9 +4724,8 @@ func (s *ZonedAPI) CreateBackend(req *ZonedAPICreateBackendRequest, opts ...scw. } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/backends", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/backends", } err = scwReq.SetBody(req) @@ -2352,15 +4742,7 @@ func (s *ZonedAPI) CreateBackend(req *ZonedAPICreateBackendRequest, opts ...scw. return &resp, nil } -type ZonedAPIGetBackendRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // BackendID: backend ID. - BackendID string `json:"-"` -} - -// GetBackend: get a backend of a given Load Balancer. -// Get the full details of a given backend, specified by its backend ID. The response contains the backend's full configuration parameters including protocol, port and forwarding algorithm. +// GetBackend: Get the full details of a given backend, specified by its backend ID. The response contains the backend's full configuration parameters including protocol, port and forwarding algorithm. func (s *ZonedAPI) GetBackend(req *ZonedAPIGetBackendRequest, opts ...scw.RequestOption) (*Backend, error) { var err error @@ -2378,9 +4760,8 @@ func (s *ZonedAPI) GetBackend(req *ZonedAPIGetBackendRequest, opts ...scw.Reques } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/backends/" + fmt.Sprint(req.BackendID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/backends/" + fmt.Sprint(req.BackendID) + "", } var resp Backend @@ -2392,101 +4773,7 @@ func (s *ZonedAPI) GetBackend(req *ZonedAPIGetBackendRequest, opts ...scw.Reques return &resp, nil } -type ZonedAPIUpdateBackendRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // BackendID: backend ID. - BackendID string `json:"-"` - // Name: backend name. - Name string `json:"name"` - // ForwardProtocol: protocol to be used by the backend when forwarding traffic to backend servers. - // Default value: tcp - ForwardProtocol Protocol `json:"forward_protocol"` - // ForwardPort: port to be used by the backend when forwarding traffic to backend servers. - ForwardPort int32 `json:"forward_port"` - // ForwardPortAlgorithm: load balancing algorithm to be used when determining which backend server to forward new traffic to. - // Default value: roundrobin - ForwardPortAlgorithm ForwardPortAlgorithm `json:"forward_port_algorithm"` - // StickySessions: defines whether to activate sticky sessions (binding a particular session to a particular backend server) and the method to use if so. None disables sticky sessions. Cookie-based uses an HTTP cookie to stick a session to a backend server. Table-based uses the source (client) IP address to stick a session to a backend server. - // Default value: none - StickySessions StickySessionsType `json:"sticky_sessions"` - // StickySessionsCookieName: cookie name for cookie-based sticky sessions. - StickySessionsCookieName string `json:"sticky_sessions_cookie_name"` - // Deprecated: SendProxyV2: deprecated in favor of proxy_protocol field. - SendProxyV2 *bool `json:"send_proxy_v2,omitempty"` - // TimeoutServer: maximum allowed time for a backend server to process a request. - // Default value: 300000 - TimeoutServer *time.Duration `json:"timeout_server"` - // TimeoutConnect: maximum allowed time for establishing a connection to a backend server. - // Default value: 5000 - TimeoutConnect *time.Duration `json:"timeout_connect"` - // TimeoutTunnel: maximum allowed tunnel inactivity time after Websocket is established (takes precedence over client and server timeout). - // Default value: 900000 - TimeoutTunnel *time.Duration `json:"timeout_tunnel"` - // OnMarkedDownAction: action to take when a backend server is marked as down. - // Default value: on_marked_down_action_none - OnMarkedDownAction OnMarkedDownAction `json:"on_marked_down_action"` - // ProxyProtocol: protocol to use between the Load Balancer and backend servers. Allows the backend servers to be informed of the client's real IP address. The PROXY protocol must be supported by the backend servers' software. - // Default value: proxy_protocol_unknown - ProxyProtocol ProxyProtocol `json:"proxy_protocol"` - // FailoverHost: scaleway S3 bucket website to be served as failover if all backend servers are down, e.g. failover-website.s3-website.fr-par.scw.cloud. - FailoverHost *string `json:"failover_host"` - // SslBridging: defines whether to enable SSL bridging between the Load Balancer and backend servers. - SslBridging *bool `json:"ssl_bridging"` - // IgnoreSslServerVerify: defines whether the server certificate verification should be ignored. - IgnoreSslServerVerify *bool `json:"ignore_ssl_server_verify"` - // RedispatchAttemptCount: whether to use another backend server on each attempt. - RedispatchAttemptCount *int32 `json:"redispatch_attempt_count"` - // MaxRetries: number of retries when a backend server connection failed. - MaxRetries *int32 `json:"max_retries"` - // MaxConnections: maximum number of connections allowed per backend server. - MaxConnections *int32 `json:"max_connections"` - // TimeoutQueue: maximum time for a request to be left pending in queue when `max_connections` is reached. - TimeoutQueue *scw.Duration `json:"timeout_queue"` -} - -func (m *ZonedAPIUpdateBackendRequest) UnmarshalJSON(b []byte) error { - type tmpType ZonedAPIUpdateBackendRequest - tmp := struct { - tmpType - - TmpTimeoutServer *marshaler.Duration `json:"timeout_server"` - TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect"` - TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel"` - }{} - err := json.Unmarshal(b, &tmp) - if err != nil { - return err - } - - *m = ZonedAPIUpdateBackendRequest(tmp.tmpType) - - m.TimeoutServer = tmp.TmpTimeoutServer.Standard() - m.TimeoutConnect = tmp.TmpTimeoutConnect.Standard() - m.TimeoutTunnel = tmp.TmpTimeoutTunnel.Standard() - return nil -} - -func (m ZonedAPIUpdateBackendRequest) MarshalJSON() ([]byte, error) { - type tmpType ZonedAPIUpdateBackendRequest - tmp := struct { - tmpType - - TmpTimeoutServer *marshaler.Duration `json:"timeout_server"` - TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect"` - TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel"` - }{ - tmpType: tmpType(m), - - TmpTimeoutServer: marshaler.NewDuration(m.TimeoutServer), - TmpTimeoutConnect: marshaler.NewDuration(m.TimeoutConnect), - TmpTimeoutTunnel: marshaler.NewDuration(m.TimeoutTunnel), - } - return json.Marshal(tmp) -} - -// UpdateBackend: update a backend of a given Load Balancer. -// Update a backend of a given Load Balancer, specified by its backend ID. Note that the request type is PUT and not PATCH. You must set all parameters. +// UpdateBackend: Update a backend of a given Load Balancer, specified by its backend ID. Note that the request type is PUT and not PATCH. You must set all parameters. func (s *ZonedAPI) UpdateBackend(req *ZonedAPIUpdateBackendRequest, opts ...scw.RequestOption) (*Backend, error) { var err error @@ -2504,9 +4791,8 @@ func (s *ZonedAPI) UpdateBackend(req *ZonedAPIUpdateBackendRequest, opts ...scw. } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/backends/" + fmt.Sprint(req.BackendID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/backends/" + fmt.Sprint(req.BackendID) + "", } err = scwReq.SetBody(req) @@ -2523,15 +4809,7 @@ func (s *ZonedAPI) UpdateBackend(req *ZonedAPIUpdateBackendRequest, opts ...scw. return &resp, nil } -type ZonedAPIDeleteBackendRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // BackendID: ID of the backend to delete. - BackendID string `json:"-"` -} - -// DeleteBackend: delete a backend of a given Load Balancer. -// Delete a backend of a given Load Balancer, specified by its backend ID. This action is irreversible and cannot be undone. +// DeleteBackend: Delete a backend of a given Load Balancer, specified by its backend ID. This action is irreversible and cannot be undone. func (s *ZonedAPI) DeleteBackend(req *ZonedAPIDeleteBackendRequest, opts ...scw.RequestOption) error { var err error @@ -2549,9 +4827,8 @@ func (s *ZonedAPI) DeleteBackend(req *ZonedAPIDeleteBackendRequest, opts ...scw. } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/backends/" + fmt.Sprint(req.BackendID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/backends/" + fmt.Sprint(req.BackendID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -2561,17 +4838,7 @@ func (s *ZonedAPI) DeleteBackend(req *ZonedAPIDeleteBackendRequest, opts ...scw. return nil } -type ZonedAPIAddBackendServersRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // BackendID: backend ID. - BackendID string `json:"-"` - // ServerIP: list of IP addresses to add to backend servers. - ServerIP []string `json:"server_ip"` -} - -// AddBackendServers: add a set of backend servers to a given backend. -// For a given backend specified by its backend ID, add a set of backend servers (identified by their IP addresses) it should forward traffic to. These will be appended to any existing set of backend servers for this backend. +// AddBackendServers: For a given backend specified by its backend ID, add a set of backend servers (identified by their IP addresses) it should forward traffic to. These will be appended to any existing set of backend servers for this backend. func (s *ZonedAPI) AddBackendServers(req *ZonedAPIAddBackendServersRequest, opts ...scw.RequestOption) (*Backend, error) { var err error @@ -2589,9 +4856,8 @@ func (s *ZonedAPI) AddBackendServers(req *ZonedAPIAddBackendServersRequest, opts } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/backends/" + fmt.Sprint(req.BackendID) + "/servers", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/backends/" + fmt.Sprint(req.BackendID) + "/servers", } err = scwReq.SetBody(req) @@ -2608,17 +4874,7 @@ func (s *ZonedAPI) AddBackendServers(req *ZonedAPIAddBackendServersRequest, opts return &resp, nil } -type ZonedAPIRemoveBackendServersRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // BackendID: backend ID. - BackendID string `json:"-"` - // ServerIP: list of IP addresses to remove from backend servers. - ServerIP []string `json:"server_ip"` -} - -// RemoveBackendServers: remove a set of servers for a given backend. -// For a given backend specified by its backend ID, remove the specified backend servers (identified by their IP addresses) so that it no longer forwards traffic to them. +// RemoveBackendServers: For a given backend specified by its backend ID, remove the specified backend servers (identified by their IP addresses) so that it no longer forwards traffic to them. func (s *ZonedAPI) RemoveBackendServers(req *ZonedAPIRemoveBackendServersRequest, opts ...scw.RequestOption) (*Backend, error) { var err error @@ -2636,9 +4892,8 @@ func (s *ZonedAPI) RemoveBackendServers(req *ZonedAPIRemoveBackendServersRequest } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/backends/" + fmt.Sprint(req.BackendID) + "/servers", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/backends/" + fmt.Sprint(req.BackendID) + "/servers", } err = scwReq.SetBody(req) @@ -2655,17 +4910,7 @@ func (s *ZonedAPI) RemoveBackendServers(req *ZonedAPIRemoveBackendServersRequest return &resp, nil } -type ZonedAPISetBackendServersRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // BackendID: backend ID. - BackendID string `json:"-"` - // ServerIP: list of IP addresses for backend servers. Any other existing backend servers will be removed. - ServerIP []string `json:"server_ip"` -} - -// SetBackendServers: define all backend servers for a given backend. -// For a given backend specified by its backend ID, define the set of backend servers (identified by their IP addresses) that it should forward traffic to. Any existing backend servers configured for this backend will be removed. +// SetBackendServers: For a given backend specified by its backend ID, define the set of backend servers (identified by their IP addresses) that it should forward traffic to. Any existing backend servers configured for this backend will be removed. func (s *ZonedAPI) SetBackendServers(req *ZonedAPISetBackendServersRequest, opts ...scw.RequestOption) (*Backend, error) { var err error @@ -2683,9 +4928,8 @@ func (s *ZonedAPI) SetBackendServers(req *ZonedAPISetBackendServersRequest, opts } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/backends/" + fmt.Sprint(req.BackendID) + "/servers", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/backends/" + fmt.Sprint(req.BackendID) + "/servers", } err = scwReq.SetBody(req) @@ -2702,85 +4946,7 @@ func (s *ZonedAPI) SetBackendServers(req *ZonedAPISetBackendServersRequest, opts return &resp, nil } -type ZonedAPIUpdateHealthCheckRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // BackendID: backend ID. - BackendID string `json:"-"` - // Port: port to use for the backend server health check. - Port int32 `json:"port"` - // CheckDelay: time to wait between two consecutive health checks. - CheckDelay *time.Duration `json:"check_delay"` - // CheckTimeout: maximum time a backend server has to reply to the health check. - CheckTimeout *time.Duration `json:"check_timeout"` - // CheckMaxRetries: number of consecutive unsuccessful health checks after which the server will be considered dead. - CheckMaxRetries int32 `json:"check_max_retries"` - // CheckSendProxy: defines whether proxy protocol should be activated for the health check. - CheckSendProxy bool `json:"check_send_proxy"` - // TCPConfig: object to configure a basic TCP health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - TCPConfig *HealthCheckTCPConfig `json:"tcp_config,omitempty"` - // MysqlConfig: object to configure a MySQL health check. The check requires MySQL >=3.22, for older versions, use a TCP health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - MysqlConfig *HealthCheckMysqlConfig `json:"mysql_config,omitempty"` - // PgsqlConfig: object to configure a PostgreSQL health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - PgsqlConfig *HealthCheckPgsqlConfig `json:"pgsql_config,omitempty"` - // LdapConfig: object to configure an LDAP health check. The response is analyzed to find the LDAPv3 response message. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - LdapConfig *HealthCheckLdapConfig `json:"ldap_config,omitempty"` - // RedisConfig: object to configure a Redis health check. The response is analyzed to find the +PONG response message. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - RedisConfig *HealthCheckRedisConfig `json:"redis_config,omitempty"` - // HTTPConfig: object to configure an HTTP health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - HTTPConfig *HealthCheckHTTPConfig `json:"http_config,omitempty"` - // HTTPSConfig: object to configure an HTTPS health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - HTTPSConfig *HealthCheckHTTPSConfig `json:"https_config,omitempty"` - // TransientCheckDelay: time to wait between two consecutive health checks when a backend server is in a transient state (going UP or DOWN). - // Default value: 0.5s - TransientCheckDelay *scw.Duration `json:"transient_check_delay"` -} - -func (m *ZonedAPIUpdateHealthCheckRequest) UnmarshalJSON(b []byte) error { - type tmpType ZonedAPIUpdateHealthCheckRequest - tmp := struct { - tmpType - - TmpCheckDelay *marshaler.Duration `json:"check_delay"` - TmpCheckTimeout *marshaler.Duration `json:"check_timeout"` - }{} - err := json.Unmarshal(b, &tmp) - if err != nil { - return err - } - - *m = ZonedAPIUpdateHealthCheckRequest(tmp.tmpType) - - m.CheckDelay = tmp.TmpCheckDelay.Standard() - m.CheckTimeout = tmp.TmpCheckTimeout.Standard() - return nil -} - -func (m ZonedAPIUpdateHealthCheckRequest) MarshalJSON() ([]byte, error) { - type tmpType ZonedAPIUpdateHealthCheckRequest - tmp := struct { - tmpType - - TmpCheckDelay *marshaler.Duration `json:"check_delay"` - TmpCheckTimeout *marshaler.Duration `json:"check_timeout"` - }{ - tmpType: tmpType(m), - - TmpCheckDelay: marshaler.NewDuration(m.CheckDelay), - TmpCheckTimeout: marshaler.NewDuration(m.CheckTimeout), - } - return json.Marshal(tmp) -} - -// UpdateHealthCheck: update a health check for a given backend. -// Update the configuration of the health check performed by a given backend to verify the health of its backend servers, identified by its backend ID. Note that the request type is PUT and not PATCH. You must set all parameters. +// UpdateHealthCheck: Update the configuration of the health check performed by a given backend to verify the health of its backend servers, identified by its backend ID. Note that the request type is PUT and not PATCH. You must set all parameters. func (s *ZonedAPI) UpdateHealthCheck(req *ZonedAPIUpdateHealthCheckRequest, opts ...scw.RequestOption) (*HealthCheck, error) { var err error @@ -2798,9 +4964,8 @@ func (s *ZonedAPI) UpdateHealthCheck(req *ZonedAPIUpdateHealthCheckRequest, opts } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/backends/" + fmt.Sprint(req.BackendID) + "/healthcheck", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/backends/" + fmt.Sprint(req.BackendID) + "/healthcheck", } err = scwReq.SetBody(req) @@ -2817,24 +4982,7 @@ func (s *ZonedAPI) UpdateHealthCheck(req *ZonedAPIUpdateHealthCheckRequest, opts return &resp, nil } -type ZonedAPIListFrontendsRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // Name: name of the frontend to filter for. - Name *string `json:"-"` - // OrderBy: sort order of frontends in the response. - // Default value: created_at_asc - OrderBy ListFrontendsRequestOrderBy `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: number of frontends to return. - PageSize *uint32 `json:"-"` -} - -// ListFrontends: list frontends of a given Load Balancer. -// List all the frontends of a Load Balancer, specified by its Load Balancer ID. By default, results are returned in ascending order by the creation date of each frontend. The response is an array of frontend objects, containing full details of each one including the port they listen on and the backend they are attached to. +// ListFrontends: List all the frontends of a Load Balancer, specified by its Load Balancer ID. By default, results are returned in ascending order by the creation date of each frontend. The response is an array of frontend objects, containing full details of each one including the port they listen on and the backend they are attached to. func (s *ZonedAPI) ListFrontends(req *ZonedAPIListFrontendsRequest, opts ...scw.RequestOption) (*ListFrontendsResponse, error) { var err error @@ -2863,10 +5011,9 @@ func (s *ZonedAPI) ListFrontends(req *ZonedAPIListFrontendsRequest, opts ...scw. } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/frontends", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/frontends", + Query: query, } var resp ListFrontendsResponse @@ -2878,62 +5025,7 @@ func (s *ZonedAPI) ListFrontends(req *ZonedAPIListFrontendsRequest, opts ...scw. return &resp, nil } -type ZonedAPICreateFrontendRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID (ID of the Load Balancer to attach the frontend to). - LBID string `json:"-"` - // Name: name for the frontend. - Name string `json:"name"` - // InboundPort: port the frontend should listen on. - InboundPort int32 `json:"inbound_port"` - // BackendID: backend ID (ID of the backend the frontend should pass traffic to). - BackendID string `json:"backend_id"` - // TimeoutClient: maximum allowed inactivity time on the client side. - // Default value: 300000 - TimeoutClient *time.Duration `json:"timeout_client"` - // Deprecated: CertificateID: certificate ID, deprecated in favor of certificate_ids array. - CertificateID *string `json:"certificate_id,omitempty"` - // CertificateIDs: list of SSL/TLS certificate IDs to bind to the frontend. - CertificateIDs *[]string `json:"certificate_ids"` - // EnableHTTP3: defines whether to enable HTTP/3 protocol on the frontend. - EnableHTTP3 bool `json:"enable_http3"` -} - -func (m *ZonedAPICreateFrontendRequest) UnmarshalJSON(b []byte) error { - type tmpType ZonedAPICreateFrontendRequest - tmp := struct { - tmpType - - TmpTimeoutClient *marshaler.Duration `json:"timeout_client"` - }{} - err := json.Unmarshal(b, &tmp) - if err != nil { - return err - } - - *m = ZonedAPICreateFrontendRequest(tmp.tmpType) - - m.TimeoutClient = tmp.TmpTimeoutClient.Standard() - return nil -} - -func (m ZonedAPICreateFrontendRequest) MarshalJSON() ([]byte, error) { - type tmpType ZonedAPICreateFrontendRequest - tmp := struct { - tmpType - - TmpTimeoutClient *marshaler.Duration `json:"timeout_client"` - }{ - tmpType: tmpType(m), - - TmpTimeoutClient: marshaler.NewDuration(m.TimeoutClient), - } - return json.Marshal(tmp) -} - -// CreateFrontend: create a frontend in a given Load Balancer. -// Create a new frontend for a given Load Balancer, specifying its configuration including the port it should listen on and the backend to attach it to. +// CreateFrontend: Create a new frontend for a given Load Balancer, specifying its configuration including the port it should listen on and the backend to attach it to. func (s *ZonedAPI) CreateFrontend(req *ZonedAPICreateFrontendRequest, opts ...scw.RequestOption) (*Frontend, error) { var err error @@ -2955,9 +5047,8 @@ func (s *ZonedAPI) CreateFrontend(req *ZonedAPICreateFrontendRequest, opts ...sc } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/frontends", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/frontends", } err = scwReq.SetBody(req) @@ -2974,15 +5065,7 @@ func (s *ZonedAPI) CreateFrontend(req *ZonedAPICreateFrontendRequest, opts ...sc return &resp, nil } -type ZonedAPIGetFrontendRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // FrontendID: frontend ID. - FrontendID string `json:"-"` -} - -// GetFrontend: get a frontend. -// Get the full details of a given frontend, specified by its frontend ID. The response contains the frontend's full configuration parameters including the backend it is attached to, the port it listens on, and any certificates it has. +// GetFrontend: Get the full details of a given frontend, specified by its frontend ID. The response contains the frontend's full configuration parameters including the backend it is attached to, the port it listens on, and any certificates it has. func (s *ZonedAPI) GetFrontend(req *ZonedAPIGetFrontendRequest, opts ...scw.RequestOption) (*Frontend, error) { var err error @@ -3000,9 +5083,8 @@ func (s *ZonedAPI) GetFrontend(req *ZonedAPIGetFrontendRequest, opts ...scw.Requ } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/frontends/" + fmt.Sprint(req.FrontendID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/frontends/" + fmt.Sprint(req.FrontendID) + "", } var resp Frontend @@ -3014,62 +5096,7 @@ func (s *ZonedAPI) GetFrontend(req *ZonedAPIGetFrontendRequest, opts ...scw.Requ return &resp, nil } -type ZonedAPIUpdateFrontendRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // FrontendID: frontend ID. - FrontendID string `json:"-"` - // Name: frontend name. - Name string `json:"name"` - // InboundPort: port the frontend should listen on. - InboundPort int32 `json:"inbound_port"` - // BackendID: backend ID (ID of the backend the frontend should pass traffic to). - BackendID string `json:"backend_id"` - // TimeoutClient: maximum allowed inactivity time on the client side. - // Default value: 300000 - TimeoutClient *time.Duration `json:"timeout_client"` - // Deprecated: CertificateID: certificate ID, deprecated in favor of certificate_ids array. - CertificateID *string `json:"certificate_id,omitempty"` - // CertificateIDs: list of SSL/TLS certificate IDs to bind to the frontend. - CertificateIDs *[]string `json:"certificate_ids"` - // EnableHTTP3: defines whether to enable HTTP/3 protocol on the frontend. - EnableHTTP3 bool `json:"enable_http3"` -} - -func (m *ZonedAPIUpdateFrontendRequest) UnmarshalJSON(b []byte) error { - type tmpType ZonedAPIUpdateFrontendRequest - tmp := struct { - tmpType - - TmpTimeoutClient *marshaler.Duration `json:"timeout_client"` - }{} - err := json.Unmarshal(b, &tmp) - if err != nil { - return err - } - - *m = ZonedAPIUpdateFrontendRequest(tmp.tmpType) - - m.TimeoutClient = tmp.TmpTimeoutClient.Standard() - return nil -} - -func (m ZonedAPIUpdateFrontendRequest) MarshalJSON() ([]byte, error) { - type tmpType ZonedAPIUpdateFrontendRequest - tmp := struct { - tmpType - - TmpTimeoutClient *marshaler.Duration `json:"timeout_client"` - }{ - tmpType: tmpType(m), - - TmpTimeoutClient: marshaler.NewDuration(m.TimeoutClient), - } - return json.Marshal(tmp) -} - -// UpdateFrontend: update a frontend. -// Update a given frontend, specified by its frontend ID. You can update configuration parameters including its name and the port it listens on. Note that the request type is PUT and not PATCH. You must set all parameters. +// UpdateFrontend: Update a given frontend, specified by its frontend ID. You can update configuration parameters including its name and the port it listens on. Note that the request type is PUT and not PATCH. You must set all parameters. func (s *ZonedAPI) UpdateFrontend(req *ZonedAPIUpdateFrontendRequest, opts ...scw.RequestOption) (*Frontend, error) { var err error @@ -3087,9 +5114,8 @@ func (s *ZonedAPI) UpdateFrontend(req *ZonedAPIUpdateFrontendRequest, opts ...sc } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/frontends/" + fmt.Sprint(req.FrontendID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/frontends/" + fmt.Sprint(req.FrontendID) + "", } err = scwReq.SetBody(req) @@ -3106,15 +5132,7 @@ func (s *ZonedAPI) UpdateFrontend(req *ZonedAPIUpdateFrontendRequest, opts ...sc return &resp, nil } -type ZonedAPIDeleteFrontendRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // FrontendID: ID of the frontend to delete. - FrontendID string `json:"-"` -} - -// DeleteFrontend: delete a frontend. -// Delete a given frontend, specified by its frontend ID. This action is irreversible and cannot be undone. +// DeleteFrontend: Delete a given frontend, specified by its frontend ID. This action is irreversible and cannot be undone. func (s *ZonedAPI) DeleteFrontend(req *ZonedAPIDeleteFrontendRequest, opts ...scw.RequestOption) error { var err error @@ -3132,9 +5150,8 @@ func (s *ZonedAPI) DeleteFrontend(req *ZonedAPIDeleteFrontendRequest, opts ...sc } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/frontends/" + fmt.Sprint(req.FrontendID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/frontends/" + fmt.Sprint(req.FrontendID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -3144,22 +5161,7 @@ func (s *ZonedAPI) DeleteFrontend(req *ZonedAPIDeleteFrontendRequest, opts ...sc return nil } -type ZonedAPIListRoutesRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // OrderBy: sort order of routes in the response. - // Default value: created_at_asc - OrderBy ListRoutesRequestOrderBy `json:"-"` - // PageSize: the number of route objects to return. - PageSize *uint32 `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // FrontendID: frontend ID to filter for, only Routes from this Frontend will be returned. - FrontendID *string `json:"-"` -} - -// ListRoutes: list all routes. -// List all routes for a given frontend. The response is an array of routes, each one with a specified backend to direct to if a certain condition is matched (based on the value of the SNI field or HTTP Host header). +// ListRoutes: List all routes for a given frontend. The response is an array of routes, each one with a specified backend to direct to if a certain condition is matched (based on the value of the SNI field or HTTP Host header). func (s *ZonedAPI) ListRoutes(req *ZonedAPIListRoutesRequest, opts ...scw.RequestOption) (*ListRoutesResponse, error) { var err error @@ -3184,10 +5186,9 @@ func (s *ZonedAPI) ListRoutes(req *ZonedAPIListRoutesRequest, opts ...scw.Reques } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/routes", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/routes", + Query: query, } var resp ListRoutesResponse @@ -3199,19 +5200,7 @@ func (s *ZonedAPI) ListRoutes(req *ZonedAPIListRoutesRequest, opts ...scw.Reques return &resp, nil } -type ZonedAPICreateRouteRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // FrontendID: ID of the source frontend to create the route on. - FrontendID string `json:"frontend_id"` - // BackendID: ID of the target backend for the route. - BackendID string `json:"backend_id"` - // Match: object defining the match condition for a route to be applied. If an incoming client session matches the specified condition (i.e. it has a matching SNI value or HTTP Host header value), it will be passed to the target backend. - Match *RouteMatch `json:"match"` -} - -// CreateRoute: create a route. -// Create a new route on a given frontend. To configure a route, specify the backend to direct to if a certain condition is matched (based on the value of the SNI field or HTTP Host header). +// CreateRoute: Create a new route on a given frontend. To configure a route, specify the backend to direct to if a certain condition is matched (based on the value of the SNI field or HTTP Host header). func (s *ZonedAPI) CreateRoute(req *ZonedAPICreateRouteRequest, opts ...scw.RequestOption) (*Route, error) { var err error @@ -3225,9 +5214,8 @@ func (s *ZonedAPI) CreateRoute(req *ZonedAPICreateRouteRequest, opts ...scw.Requ } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/routes", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/routes", } err = scwReq.SetBody(req) @@ -3244,15 +5232,7 @@ func (s *ZonedAPI) CreateRoute(req *ZonedAPICreateRouteRequest, opts ...scw.Requ return &resp, nil } -type ZonedAPIGetRouteRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // RouteID: route ID. - RouteID string `json:"-"` -} - -// GetRoute: get a route. -// Retrieve information about an existing route, specified by its route ID. Its full details, origin frontend, target backend and match condition, are returned in the response object. +// GetRoute: Retrieve information about an existing route, specified by its route ID. Its full details, origin frontend, target backend and match condition, are returned in the response object. func (s *ZonedAPI) GetRoute(req *ZonedAPIGetRouteRequest, opts ...scw.RequestOption) (*Route, error) { var err error @@ -3270,9 +5250,8 @@ func (s *ZonedAPI) GetRoute(req *ZonedAPIGetRouteRequest, opts ...scw.RequestOpt } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/routes/" + fmt.Sprint(req.RouteID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/routes/" + fmt.Sprint(req.RouteID) + "", } var resp Route @@ -3284,19 +5263,7 @@ func (s *ZonedAPI) GetRoute(req *ZonedAPIGetRouteRequest, opts ...scw.RequestOpt return &resp, nil } -type ZonedAPIUpdateRouteRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // RouteID: route ID. - RouteID string `json:"-"` - // BackendID: ID of the target backend for the route. - BackendID string `json:"backend_id"` - // Match: object defining the match condition for a route to be applied. If an incoming client session matches the specified condition (i.e. it has a matching SNI value or HTTP Host header value), it will be passed to the target backend. - Match *RouteMatch `json:"match"` -} - -// UpdateRoute: update a route. -// Update the configuration of an existing route, specified by its route ID. +// UpdateRoute: Update the configuration of an existing route, specified by its route ID. func (s *ZonedAPI) UpdateRoute(req *ZonedAPIUpdateRouteRequest, opts ...scw.RequestOption) (*Route, error) { var err error @@ -3314,9 +5281,8 @@ func (s *ZonedAPI) UpdateRoute(req *ZonedAPIUpdateRouteRequest, opts ...scw.Requ } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/routes/" + fmt.Sprint(req.RouteID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/routes/" + fmt.Sprint(req.RouteID) + "", } err = scwReq.SetBody(req) @@ -3333,15 +5299,7 @@ func (s *ZonedAPI) UpdateRoute(req *ZonedAPIUpdateRouteRequest, opts ...scw.Requ return &resp, nil } -type ZonedAPIDeleteRouteRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // RouteID: route ID. - RouteID string `json:"-"` -} - -// DeleteRoute: delete a route. -// Delete an existing route, specified by its route ID. Deleting a route is permanent, and cannot be undone. +// DeleteRoute: Delete an existing route, specified by its route ID. Deleting a route is permanent, and cannot be undone. func (s *ZonedAPI) DeleteRoute(req *ZonedAPIDeleteRouteRequest, opts ...scw.RequestOption) error { var err error @@ -3359,9 +5317,8 @@ func (s *ZonedAPI) DeleteRoute(req *ZonedAPIDeleteRouteRequest, opts ...scw.Requ } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/routes/" + fmt.Sprint(req.RouteID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/routes/" + fmt.Sprint(req.RouteID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -3371,16 +5328,7 @@ func (s *ZonedAPI) DeleteRoute(req *ZonedAPIDeleteRouteRequest, opts ...scw.Requ return nil } -type ZonedAPIGetLBStatsRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // BackendID: ID of the backend. - BackendID *string `json:"-"` -} - -// Deprecated: GetLBStats: get usage statistics of a given Load Balancer. +// Deprecated: GetLBStats: Get usage statistics of a given Load Balancer. func (s *ZonedAPI) GetLBStats(req *ZonedAPIGetLBStatsRequest, opts ...scw.RequestOption) (*LBStats, error) { var err error @@ -3401,10 +5349,9 @@ func (s *ZonedAPI) GetLBStats(req *ZonedAPIGetLBStatsRequest, opts ...scw.Reques } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/stats", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/stats", + Query: query, } var resp LBStats @@ -3416,21 +5363,7 @@ func (s *ZonedAPI) GetLBStats(req *ZonedAPIGetLBStatsRequest, opts ...scw.Reques return &resp, nil } -type ZonedAPIListBackendStatsRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: number of items to return. - PageSize *uint32 `json:"-"` - // BackendID: ID of the backend. - BackendID *string `json:"-"` -} - -// ListBackendStats: list backend server statistics. -// List information about your backend servers, including their state and the result of their last health check. +// ListBackendStats: List information about your backend servers, including their state and the result of their last health check. func (s *ZonedAPI) ListBackendStats(req *ZonedAPIListBackendStatsRequest, opts ...scw.RequestOption) (*ListBackendStatsResponse, error) { var err error @@ -3458,10 +5391,9 @@ func (s *ZonedAPI) ListBackendStats(req *ZonedAPIListBackendStatsRequest, opts . } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/backend-stats", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/backend-stats", + Query: query, } var resp ListBackendStatsResponse @@ -3473,24 +5405,7 @@ func (s *ZonedAPI) ListBackendStats(req *ZonedAPIListBackendStatsRequest, opts . return &resp, nil } -type ZonedAPIListACLsRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // FrontendID: frontend ID (ACLs attached to this frontend will be returned in the response). - FrontendID string `json:"-"` - // OrderBy: sort order of ACLs in the response. - // Default value: created_at_asc - OrderBy ListACLRequestOrderBy `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: the number of ACLs to return. - PageSize *uint32 `json:"-"` - // Name: ACL name to filter for. - Name *string `json:"-"` -} - -// ListACLs: list ACLs for a given frontend. -// List the ACLs for a given frontend, specified by its frontend ID. The response is an array of ACL objects, each one representing an ACL that denies or allows traffic based on certain conditions. +// ListACLs: List the ACLs for a given frontend, specified by its frontend ID. The response is an array of ACL objects, each one representing an ACL that denies or allows traffic based on certain conditions. func (s *ZonedAPI) ListACLs(req *ZonedAPIListACLsRequest, opts ...scw.RequestOption) (*ListACLResponse, error) { var err error @@ -3519,10 +5434,9 @@ func (s *ZonedAPI) ListACLs(req *ZonedAPIListACLsRequest, opts ...scw.RequestOpt } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/frontends/" + fmt.Sprint(req.FrontendID) + "/acls", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/frontends/" + fmt.Sprint(req.FrontendID) + "/acls", + Query: query, } var resp ListACLResponse @@ -3534,25 +5448,7 @@ func (s *ZonedAPI) ListACLs(req *ZonedAPIListACLsRequest, opts ...scw.RequestOpt return &resp, nil } -type ZonedAPICreateACLRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // FrontendID: frontend ID to attach the ACL to. - FrontendID string `json:"-"` - // Name: ACL name. - Name string `json:"name"` - // Action: action to take when incoming traffic matches an ACL filter. - Action *ACLAction `json:"action"` - // Match: ACL match filter object. One of `ip_subnet` or `http_filter` & `http_filter_value` are required. - Match *ACLMatch `json:"match"` - // Index: priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed). - Index int32 `json:"index"` - // Description: ACL description. - Description string `json:"description"` -} - -// CreateACL: create an ACL for a given frontend. -// Create a new ACL for a given frontend. Each ACL must have a name, an action to perform (allow or deny), and a match rule (the action is carried out when the incoming traffic matches the rule). +// CreateACL: Create a new ACL for a given frontend. Each ACL must have a name, an action to perform (allow or deny), and a match rule (the action is carried out when the incoming traffic matches the rule). func (s *ZonedAPI) CreateACL(req *ZonedAPICreateACLRequest, opts ...scw.RequestOption) (*ACL, error) { var err error @@ -3574,9 +5470,8 @@ func (s *ZonedAPI) CreateACL(req *ZonedAPICreateACLRequest, opts ...scw.RequestO } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/frontends/" + fmt.Sprint(req.FrontendID) + "/acls", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/frontends/" + fmt.Sprint(req.FrontendID) + "/acls", } err = scwReq.SetBody(req) @@ -3593,15 +5488,7 @@ func (s *ZonedAPI) CreateACL(req *ZonedAPICreateACLRequest, opts ...scw.RequestO return &resp, nil } -type ZonedAPIGetACLRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ACLID: ACL ID. - ACLID string `json:"-"` -} - -// GetACL: get an ACL. -// Get information for a particular ACL, specified by its ACL ID. The response returns full details of the ACL, including its name, action, match rule and frontend. +// GetACL: Get information for a particular ACL, specified by its ACL ID. The response returns full details of the ACL, including its name, action, match rule and frontend. func (s *ZonedAPI) GetACL(req *ZonedAPIGetACLRequest, opts ...scw.RequestOption) (*ACL, error) { var err error @@ -3619,9 +5506,8 @@ func (s *ZonedAPI) GetACL(req *ZonedAPIGetACLRequest, opts ...scw.RequestOption) } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/acls/" + fmt.Sprint(req.ACLID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/acls/" + fmt.Sprint(req.ACLID) + "", } var resp ACL @@ -3633,25 +5519,7 @@ func (s *ZonedAPI) GetACL(req *ZonedAPIGetACLRequest, opts ...scw.RequestOption) return &resp, nil } -type ZonedAPIUpdateACLRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ACLID: ACL ID. - ACLID string `json:"-"` - // Name: ACL name. - Name string `json:"name"` - // Action: action to take when incoming traffic matches an ACL filter. - Action *ACLAction `json:"action"` - // Match: ACL match filter object. One of `ip_subnet` or `http_filter` & `http_filter_value` are required. - Match *ACLMatch `json:"match"` - // Index: priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed). - Index int32 `json:"index"` - // Description: ACL description. - Description *string `json:"description"` -} - -// UpdateACL: update an ACL. -// Update a particular ACL, specified by its ACL ID. You can update details including its name, action and match rule. +// UpdateACL: Update a particular ACL, specified by its ACL ID. You can update details including its name, action and match rule. func (s *ZonedAPI) UpdateACL(req *ZonedAPIUpdateACLRequest, opts ...scw.RequestOption) (*ACL, error) { var err error @@ -3669,9 +5537,8 @@ func (s *ZonedAPI) UpdateACL(req *ZonedAPIUpdateACLRequest, opts ...scw.RequestO } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/acls/" + fmt.Sprint(req.ACLID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/acls/" + fmt.Sprint(req.ACLID) + "", } err = scwReq.SetBody(req) @@ -3688,15 +5555,7 @@ func (s *ZonedAPI) UpdateACL(req *ZonedAPIUpdateACLRequest, opts ...scw.RequestO return &resp, nil } -type ZonedAPIDeleteACLRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // ACLID: ACL ID. - ACLID string `json:"-"` -} - -// DeleteACL: delete an ACL. -// Delete an ACL, specified by its ACL ID. Deleting an ACL is irreversible and cannot be undone. +// DeleteACL: Delete an ACL, specified by its ACL ID. Deleting an ACL is irreversible and cannot be undone. func (s *ZonedAPI) DeleteACL(req *ZonedAPIDeleteACLRequest, opts ...scw.RequestOption) error { var err error @@ -3714,9 +5573,8 @@ func (s *ZonedAPI) DeleteACL(req *ZonedAPIDeleteACLRequest, opts ...scw.RequestO } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/acls/" + fmt.Sprint(req.ACLID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/acls/" + fmt.Sprint(req.ACLID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -3726,17 +5584,7 @@ func (s *ZonedAPI) DeleteACL(req *ZonedAPIDeleteACLRequest, opts ...scw.RequestO return nil } -type ZonedAPISetACLsRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // FrontendID: frontend ID. - FrontendID string `json:"-"` - // ACLs: list of ACLs for this frontend. Any other existing ACLs on this frontend will be removed. - ACLs []*ACLSpec `json:"acls"` -} - -// SetACLs: define all ACLs for a given frontend. -// For a given frontend specified by its frontend ID, define and add the complete set of ACLS for that frontend. Any existing ACLs on this frontend will be removed. +// SetACLs: For a given frontend specified by its frontend ID, define and add the complete set of ACLS for that frontend. Any existing ACLs on this frontend will be removed. func (s *ZonedAPI) SetACLs(req *ZonedAPISetACLsRequest, opts ...scw.RequestOption) (*SetACLsResponse, error) { var err error @@ -3754,9 +5602,8 @@ func (s *ZonedAPI) SetACLs(req *ZonedAPISetACLsRequest, opts ...scw.RequestOptio } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/frontends/" + fmt.Sprint(req.FrontendID) + "/acls", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/frontends/" + fmt.Sprint(req.FrontendID) + "/acls", } err = scwReq.SetBody(req) @@ -3773,23 +5620,7 @@ func (s *ZonedAPI) SetACLs(req *ZonedAPISetACLsRequest, opts ...scw.RequestOptio return &resp, nil } -type ZonedAPICreateCertificateRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // Name: name for the certificate. - Name string `json:"name"` - // Letsencrypt: object to define a new Let's Encrypt certificate to be generated. - // Precisely one of CustomCertificate, Letsencrypt must be set. - Letsencrypt *CreateCertificateRequestLetsencryptConfig `json:"letsencrypt,omitempty"` - // CustomCertificate: object to define an existing custom certificate to be imported. - // Precisely one of CustomCertificate, Letsencrypt must be set. - CustomCertificate *CreateCertificateRequestCustomCertificate `json:"custom_certificate,omitempty"` -} - -// CreateCertificate: create an SSL/TLS certificate. -// Generate a new SSL/TLS certificate for a given Load Balancer. You can choose to create a Let's Encrypt certificate, or import a custom certificate. +// CreateCertificate: Generate a new SSL/TLS certificate for a given Load Balancer. You can choose to create a Let's Encrypt certificate, or import a custom certificate. func (s *ZonedAPI) CreateCertificate(req *ZonedAPICreateCertificateRequest, opts ...scw.RequestOption) (*Certificate, error) { var err error @@ -3811,9 +5642,8 @@ func (s *ZonedAPI) CreateCertificate(req *ZonedAPICreateCertificateRequest, opts } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/certificates", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/certificates", } err = scwReq.SetBody(req) @@ -3830,24 +5660,7 @@ func (s *ZonedAPI) CreateCertificate(req *ZonedAPICreateCertificateRequest, opts return &resp, nil } -type ZonedAPIListCertificatesRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // OrderBy: sort order of certificates in the response. - // Default value: created_at_asc - OrderBy ListCertificatesRequestOrderBy `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: number of certificates to return. - PageSize *uint32 `json:"-"` - // Name: certificate name to filter for, only certificates of this name will be returned. - Name *string `json:"-"` -} - -// ListCertificates: list all SSL/TLS certificates on a given Load Balancer. -// List all the SSL/TLS certificates on a given Load Balancer. The response is an array of certificate objects, which are by default listed in ascending order of creation date. +// ListCertificates: List all the SSL/TLS certificates on a given Load Balancer. The response is an array of certificate objects, which are by default listed in ascending order of creation date. func (s *ZonedAPI) ListCertificates(req *ZonedAPIListCertificatesRequest, opts ...scw.RequestOption) (*ListCertificatesResponse, error) { var err error @@ -3876,10 +5689,9 @@ func (s *ZonedAPI) ListCertificates(req *ZonedAPIListCertificatesRequest, opts . } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/certificates", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/certificates", + Query: query, } var resp ListCertificatesResponse @@ -3891,15 +5703,7 @@ func (s *ZonedAPI) ListCertificates(req *ZonedAPIListCertificatesRequest, opts . return &resp, nil } -type ZonedAPIGetCertificateRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // CertificateID: certificate ID. - CertificateID string `json:"-"` -} - -// GetCertificate: get an SSL/TLS certificate. -// Get information for a particular SSL/TLS certificate, specified by its certificate ID. The response returns full details of the certificate, including its type, main domain name, and alternative domain names. +// GetCertificate: Get information for a particular SSL/TLS certificate, specified by its certificate ID. The response returns full details of the certificate, including its type, main domain name, and alternative domain names. func (s *ZonedAPI) GetCertificate(req *ZonedAPIGetCertificateRequest, opts ...scw.RequestOption) (*Certificate, error) { var err error @@ -3917,9 +5721,8 @@ func (s *ZonedAPI) GetCertificate(req *ZonedAPIGetCertificateRequest, opts ...sc } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/certificates/" + fmt.Sprint(req.CertificateID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/certificates/" + fmt.Sprint(req.CertificateID) + "", } var resp Certificate @@ -3931,17 +5734,7 @@ func (s *ZonedAPI) GetCertificate(req *ZonedAPIGetCertificateRequest, opts ...sc return &resp, nil } -type ZonedAPIUpdateCertificateRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // CertificateID: certificate ID. - CertificateID string `json:"-"` - // Name: certificate name. - Name string `json:"name"` -} - -// UpdateCertificate: update an SSL/TLS certificate. -// Update the name of a particular SSL/TLS certificate, specified by its certificate ID. +// UpdateCertificate: Update the name of a particular SSL/TLS certificate, specified by its certificate ID. func (s *ZonedAPI) UpdateCertificate(req *ZonedAPIUpdateCertificateRequest, opts ...scw.RequestOption) (*Certificate, error) { var err error @@ -3959,9 +5752,8 @@ func (s *ZonedAPI) UpdateCertificate(req *ZonedAPIUpdateCertificateRequest, opts } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/certificates/" + fmt.Sprint(req.CertificateID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/certificates/" + fmt.Sprint(req.CertificateID) + "", } err = scwReq.SetBody(req) @@ -3978,15 +5770,7 @@ func (s *ZonedAPI) UpdateCertificate(req *ZonedAPIUpdateCertificateRequest, opts return &resp, nil } -type ZonedAPIDeleteCertificateRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // CertificateID: certificate ID. - CertificateID string `json:"-"` -} - -// DeleteCertificate: delete an SSL/TLS certificate. -// Delete an SSL/TLS certificate, specified by its certificate ID. Deleting a certificate is irreversible and cannot be undone. +// DeleteCertificate: Delete an SSL/TLS certificate, specified by its certificate ID. Deleting a certificate is irreversible and cannot be undone. func (s *ZonedAPI) DeleteCertificate(req *ZonedAPIDeleteCertificateRequest, opts ...scw.RequestOption) error { var err error @@ -4004,9 +5788,8 @@ func (s *ZonedAPI) DeleteCertificate(req *ZonedAPIDeleteCertificateRequest, opts } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/certificates/" + fmt.Sprint(req.CertificateID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/certificates/" + fmt.Sprint(req.CertificateID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -4016,17 +5799,7 @@ func (s *ZonedAPI) DeleteCertificate(req *ZonedAPIDeleteCertificateRequest, opts return nil } -type ZonedAPIListLBTypesRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: the number of items to return. - PageSize *uint32 `json:"-"` -} - -// ListLBTypes: list all Load Balancer offer types. -// List all the different commercial Load Balancer types. The response includes an array of offer types, each with a name, description, and information about its stock availability. +// ListLBTypes: List all the different commercial Load Balancer types. The response includes an array of offer types, each with a name, description, and information about its stock availability. func (s *ZonedAPI) ListLBTypes(req *ZonedAPIListLBTypesRequest, opts ...scw.RequestOption) (*ListLBTypesResponse, error) { var err error @@ -4049,10 +5822,9 @@ func (s *ZonedAPI) ListLBTypes(req *ZonedAPIListLBTypesRequest, opts ...scw.Requ } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lb-types", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lb-types", + Query: query, } var resp ListLBTypesResponse @@ -4064,53 +5836,32 @@ func (s *ZonedAPI) ListLBTypes(req *ZonedAPIListLBTypesRequest, opts ...scw.Requ return &resp, nil } -type ZonedAPICreateSubscriberRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // Name: subscriber name. - Name string `json:"name"` - // EmailConfig: email address configuration. - // Precisely one of EmailConfig, WebhookConfig must be set. - EmailConfig *SubscriberEmailConfig `json:"email_config,omitempty"` - // WebhookConfig: webHook URI configuration. - // Precisely one of EmailConfig, WebhookConfig must be set. - WebhookConfig *SubscriberWebhookConfig `json:"webhook_config,omitempty"` - // Deprecated: OrganizationID: organization ID to create the subscriber in. - // Precisely one of OrganizationID, ProjectID must be set. - OrganizationID *string `json:"organization_id,omitempty"` - // ProjectID: project ID to create the subscriber in. - // Precisely one of OrganizationID, ProjectID must be set. - ProjectID *string `json:"project_id,omitempty"` -} - -// CreateSubscriber: create a subscriber. -// Create a new subscriber, either with an email configuration or a webhook configuration, for a specified Scaleway Project. +// CreateSubscriber: Create a new subscriber, either with an email configuration or a webhook configuration, for a specified Scaleway Project. func (s *ZonedAPI) CreateSubscriber(req *ZonedAPICreateSubscriberRequest, opts ...scw.RequestOption) (*Subscriber, error) { var err error - defaultProjectID, exist := s.client.GetDefaultProjectID() - if exist && req.OrganizationID == nil && req.ProjectID == nil { - req.ProjectID = &defaultProjectID - } - - defaultOrganizationID, exist := s.client.GetDefaultOrganizationID() - if exist && req.OrganizationID == nil && req.ProjectID == nil { - req.OrganizationID = &defaultOrganizationID - } - if req.Zone == "" { defaultZone, _ := s.client.GetDefaultZone() req.Zone = defaultZone } + defaultProjectID, exist := s.client.GetDefaultProjectID() + if exist && req.ProjectID == nil && req.OrganizationID == nil { + req.ProjectID = &defaultProjectID + } + + defaultOrganizationID, exist := s.client.GetDefaultOrganizationID() + if exist && req.ProjectID == nil && req.OrganizationID == nil { + req.OrganizationID = &defaultOrganizationID + } + if fmt.Sprint(req.Zone) == "" { return nil, errors.New("field Zone cannot be empty in request") } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/subscribers", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/subscribers", } err = scwReq.SetBody(req) @@ -4127,15 +5878,7 @@ func (s *ZonedAPI) CreateSubscriber(req *ZonedAPICreateSubscriberRequest, opts . return &resp, nil } -type ZonedAPIGetSubscriberRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // SubscriberID: subscriber ID. - SubscriberID string `json:"-"` -} - -// GetSubscriber: get a subscriber. -// Retrieve information about an existing subscriber, specified by its subscriber ID. Its full details, including name and email/webhook configuration, are returned in the response object. +// GetSubscriber: Retrieve information about an existing subscriber, specified by its subscriber ID. Its full details, including name and email/webhook configuration, are returned in the response object. func (s *ZonedAPI) GetSubscriber(req *ZonedAPIGetSubscriberRequest, opts ...scw.RequestOption) (*Subscriber, error) { var err error @@ -4153,9 +5896,8 @@ func (s *ZonedAPI) GetSubscriber(req *ZonedAPIGetSubscriberRequest, opts ...scw. } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/subscribers/" + fmt.Sprint(req.SubscriberID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/subscribers/" + fmt.Sprint(req.SubscriberID) + "", } var resp Subscriber @@ -4167,26 +5909,7 @@ func (s *ZonedAPI) GetSubscriber(req *ZonedAPIGetSubscriberRequest, opts ...scw. return &resp, nil } -type ZonedAPIListSubscriberRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // OrderBy: sort order of subscribers in the response. - // Default value: created_at_asc - OrderBy ListSubscriberRequestOrderBy `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: the number of items to return. - PageSize *uint32 `json:"-"` - // Name: subscriber name to search for. - Name *string `json:"-"` - // OrganizationID: filter subscribers by Organization ID. - OrganizationID *string `json:"-"` - // ProjectID: filter subscribers by Project ID. - ProjectID *string `json:"-"` -} - -// ListSubscriber: list all subscribers. -// List all subscribers to Load Balancer alerts. By default, returns all subscribers to Load Balancer alerts for the Organization associated with the authentication token used for the request. +// ListSubscriber: List all subscribers to Load Balancer alerts. By default, returns all subscribers to Load Balancer alerts for the Organization associated with the authentication token used for the request. func (s *ZonedAPI) ListSubscriber(req *ZonedAPIListSubscriberRequest, opts ...scw.RequestOption) (*ListSubscriberResponse, error) { var err error @@ -4213,10 +5936,9 @@ func (s *ZonedAPI) ListSubscriber(req *ZonedAPIListSubscriberRequest, opts ...sc } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/subscribers", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/subscribers", + Query: query, } var resp ListSubscriberResponse @@ -4228,23 +5950,7 @@ func (s *ZonedAPI) ListSubscriber(req *ZonedAPIListSubscriberRequest, opts ...sc return &resp, nil } -type ZonedAPIUpdateSubscriberRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // SubscriberID: subscriber ID. - SubscriberID string `json:"-"` - // Name: subscriber name. - Name string `json:"name"` - // EmailConfig: email address configuration. - // Precisely one of EmailConfig, WebhookConfig must be set. - EmailConfig *SubscriberEmailConfig `json:"email_config,omitempty"` - // WebhookConfig: webhook URI configuration. - // Precisely one of EmailConfig, WebhookConfig must be set. - WebhookConfig *SubscriberWebhookConfig `json:"webhook_config,omitempty"` -} - -// UpdateSubscriber: update a subscriber. -// Update the parameters of a given subscriber (e.g. name, webhook configuration, email configuration), specified by its subscriber ID. +// UpdateSubscriber: Update the parameters of a given subscriber (e.g. name, webhook configuration, email configuration), specified by its subscriber ID. func (s *ZonedAPI) UpdateSubscriber(req *ZonedAPIUpdateSubscriberRequest, opts ...scw.RequestOption) (*Subscriber, error) { var err error @@ -4262,9 +5968,8 @@ func (s *ZonedAPI) UpdateSubscriber(req *ZonedAPIUpdateSubscriberRequest, opts . } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/subscribers/" + fmt.Sprint(req.SubscriberID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/subscribers/" + fmt.Sprint(req.SubscriberID) + "", } err = scwReq.SetBody(req) @@ -4281,15 +5986,7 @@ func (s *ZonedAPI) UpdateSubscriber(req *ZonedAPIUpdateSubscriberRequest, opts . return &resp, nil } -type ZonedAPIDeleteSubscriberRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // SubscriberID: subscriber ID. - SubscriberID string `json:"-"` -} - -// DeleteSubscriber: delete a subscriber. -// Delete an existing subscriber, specified by its subscriber ID. Deleting a subscriber is permanent, and cannot be undone. +// DeleteSubscriber: Delete an existing subscriber, specified by its subscriber ID. Deleting a subscriber is permanent, and cannot be undone. func (s *ZonedAPI) DeleteSubscriber(req *ZonedAPIDeleteSubscriberRequest, opts ...scw.RequestOption) error { var err error @@ -4307,9 +6004,8 @@ func (s *ZonedAPI) DeleteSubscriber(req *ZonedAPIDeleteSubscriberRequest, opts . } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lb/subscription/" + fmt.Sprint(req.SubscriberID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lb/subscription/" + fmt.Sprint(req.SubscriberID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -4319,17 +6015,7 @@ func (s *ZonedAPI) DeleteSubscriber(req *ZonedAPIDeleteSubscriberRequest, opts . return nil } -type ZonedAPISubscribeToLBRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // SubscriberID: subscriber ID. - SubscriberID string `json:"subscriber_id"` -} - -// SubscribeToLB: subscribe a subscriber to alerts for a given Load Balancer. -// Subscribe an existing subscriber to alerts for a given Load Balancer. +// SubscribeToLB: Subscribe an existing subscriber to alerts for a given Load Balancer. func (s *ZonedAPI) SubscribeToLB(req *ZonedAPISubscribeToLBRequest, opts ...scw.RequestOption) (*LB, error) { var err error @@ -4347,9 +6033,8 @@ func (s *ZonedAPI) SubscribeToLB(req *ZonedAPISubscribeToLBRequest, opts ...scw. } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lb/" + fmt.Sprint(req.LBID) + "/subscribe", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lb/" + fmt.Sprint(req.LBID) + "/subscribe", } err = scwReq.SetBody(req) @@ -4366,15 +6051,7 @@ func (s *ZonedAPI) SubscribeToLB(req *ZonedAPISubscribeToLBRequest, opts ...scw. return &resp, nil } -type ZonedAPIUnsubscribeFromLBRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` -} - -// UnsubscribeFromLB: unsubscribe a subscriber from alerts for a given Load Balancer. -// Unsubscribe a subscriber from alerts for a given Load Balancer. The subscriber is not deleted, and can be resubscribed in the future if necessary. +// UnsubscribeFromLB: Unsubscribe a subscriber from alerts for a given Load Balancer. The subscriber is not deleted, and can be resubscribed in the future if necessary. func (s *ZonedAPI) UnsubscribeFromLB(req *ZonedAPIUnsubscribeFromLBRequest, opts ...scw.RequestOption) (*LB, error) { var err error @@ -4392,9 +6069,8 @@ func (s *ZonedAPI) UnsubscribeFromLB(req *ZonedAPIUnsubscribeFromLBRequest, opts } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lb/" + fmt.Sprint(req.LBID) + "/unsubscribe", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lb/" + fmt.Sprint(req.LBID) + "/unsubscribe", } var resp LB @@ -4406,22 +6082,7 @@ func (s *ZonedAPI) UnsubscribeFromLB(req *ZonedAPIUnsubscribeFromLBRequest, opts return &resp, nil } -type ZonedAPIListLBPrivateNetworksRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // OrderBy: sort order of Private Network objects in the response. - // Default value: created_at_asc - OrderBy ListPrivateNetworksRequestOrderBy `json:"-"` - // PageSize: number of objects to return. - PageSize *uint32 `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` -} - -// ListLBPrivateNetworks: list Private Networks attached to a Load Balancer. -// List the Private Networks attached to a given Load Balancer, specified by its Load Balancer ID. The response is an array of Private Network objects, giving information including the status, configuration, name and creation date of each Private Network. +// ListLBPrivateNetworks: List the Private Networks attached to a given Load Balancer, specified by its Load Balancer ID. The response is an array of Private Network objects, giving information including the status, configuration, name and creation date of each Private Network. func (s *ZonedAPI) ListLBPrivateNetworks(req *ZonedAPIListLBPrivateNetworksRequest, opts ...scw.RequestOption) (*ListLBPrivateNetworksResponse, error) { var err error @@ -4449,10 +6110,9 @@ func (s *ZonedAPI) ListLBPrivateNetworks(req *ZonedAPIListLBPrivateNetworksReque } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/private-networks", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/private-networks", + Query: query, } var resp ListLBPrivateNetworksResponse @@ -4464,26 +6124,7 @@ func (s *ZonedAPI) ListLBPrivateNetworks(req *ZonedAPIListLBPrivateNetworksReque return &resp, nil } -type ZonedAPIAttachPrivateNetworkRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // PrivateNetworkID: private Network ID. - PrivateNetworkID string `json:"-"` - // Deprecated: StaticConfig: object containing an array of a local IP address for the Load Balancer on this Private Network. - // Precisely one of DHCPConfig, IpamConfig, StaticConfig must be set. - StaticConfig *PrivateNetworkStaticConfig `json:"static_config,omitempty"` - // DHCPConfig: defines whether to let DHCP assign IP addresses. - // Precisely one of DHCPConfig, IpamConfig, StaticConfig must be set. - DHCPConfig *PrivateNetworkDHCPConfig `json:"dhcp_config,omitempty"` - // Deprecated: IpamConfig: for internal use only. - // Precisely one of DHCPConfig, IpamConfig, StaticConfig must be set. - IpamConfig *PrivateNetworkIpamConfig `json:"ipam_config,omitempty"` -} - -// AttachPrivateNetwork: attach a Load Balancer to a Private Network. -// Attach a specified Load Balancer to a specified Private Network, defining a static or DHCP configuration for the Load Balancer on the network. +// AttachPrivateNetwork: Attach a specified Load Balancer to a specified Private Network, defining a static or DHCP configuration for the Load Balancer on the network. func (s *ZonedAPI) AttachPrivateNetwork(req *ZonedAPIAttachPrivateNetworkRequest, opts ...scw.RequestOption) (*PrivateNetwork, error) { var err error @@ -4505,9 +6146,8 @@ func (s *ZonedAPI) AttachPrivateNetwork(req *ZonedAPIAttachPrivateNetworkRequest } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/private-networks/" + fmt.Sprint(req.PrivateNetworkID) + "/attach", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/private-networks/" + fmt.Sprint(req.PrivateNetworkID) + "/attach", } err = scwReq.SetBody(req) @@ -4524,17 +6164,7 @@ func (s *ZonedAPI) AttachPrivateNetwork(req *ZonedAPIAttachPrivateNetworkRequest return &resp, nil } -type ZonedAPIDetachPrivateNetworkRequest struct { - // Zone: zone to target. If none is passed will use default zone from the config. - Zone scw.Zone `json:"-"` - // LBID: load balancer ID. - LBID string `json:"-"` - // PrivateNetworkID: set your instance private network id. - PrivateNetworkID string `json:"-"` -} - -// DetachPrivateNetwork: detach Load Balancer from Private Network. -// Detach a specified Load Balancer from a specified Private Network. +// DetachPrivateNetwork: Detach a specified Load Balancer from a specified Private Network. func (s *ZonedAPI) DetachPrivateNetwork(req *ZonedAPIDetachPrivateNetworkRequest, opts ...scw.RequestOption) error { var err error @@ -4556,9 +6186,8 @@ func (s *ZonedAPI) DetachPrivateNetwork(req *ZonedAPIDetachPrivateNetworkRequest } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/private-networks/" + fmt.Sprint(req.PrivateNetworkID) + "/detach", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/zones/" + fmt.Sprint(req.Zone) + "/lbs/" + fmt.Sprint(req.LBID) + "/private-networks/" + fmt.Sprint(req.PrivateNetworkID) + "/detach", } err = scwReq.SetBody(req) @@ -4573,32 +6202,22 @@ func (s *ZonedAPI) DetachPrivateNetwork(req *ZonedAPIDetachPrivateNetworkRequest return nil } -// Service API +// This API allows you to manage your load balancer service. +type API struct { + client *scw.Client +} -// Regions list localities the api is available in +// Deprecated: NewAPI returns a API object from a Scaleway client. +func NewAPI(client *scw.Client) *API { + return &API{ + client: client, + } +} func (s *API) Regions() []scw.Region { return []scw.Region{scw.RegionFrPar, scw.RegionNlAms, scw.RegionPlWaw} } -type ListLBsRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // Name: load Balancer name to filter for. - Name *string `json:"-"` - // OrderBy: sort order of Load Balancers in the response. - // Default value: created_at_asc - OrderBy ListLBsRequestOrderBy `json:"-"` - // PageSize: number of Load Balancers to return. - PageSize *uint32 `json:"-"` - // Page: page number to return, from the paginated results. - Page *int32 `json:"-"` - // OrganizationID: organization ID to filter for, only Load Balancers from this Organization will be returned. - OrganizationID *string `json:"-"` - // ProjectID: project ID to filter for, only Load Balancers from this Project will be returned. - ProjectID *string `json:"-"` -} - -// ListLBs: list load balancers. +// ListLBs: List load balancers. func (s *API) ListLBs(req *ListLBsRequest, opts ...scw.RequestOption) (*ListLBsResponse, error) { var err error @@ -4625,10 +6244,9 @@ func (s *API) ListLBs(req *ListLBsRequest, opts ...scw.RequestOption) (*ListLBsR } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs", + Query: query, } var resp ListLBsResponse @@ -4640,51 +6258,25 @@ func (s *API) ListLBs(req *ListLBsRequest, opts ...scw.RequestOption) (*ListLBsR return &resp, nil } -type CreateLBRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // Deprecated: OrganizationID: scaleway Organization to create the Load Balancer in. - // Precisely one of OrganizationID, ProjectID must be set. - OrganizationID *string `json:"organization_id,omitempty"` - // ProjectID: scaleway Project to create the Load Balancer in. - // Precisely one of OrganizationID, ProjectID must be set. - ProjectID *string `json:"project_id,omitempty"` - // Name: name for the Load Balancer. - Name string `json:"name"` - // Description: description for the Load Balancer. - Description string `json:"description"` - // Deprecated: IPID: ID of an existing flexible IP address to attach to the Load Balancer. - IPID *string `json:"ip_id,omitempty"` - // AssignFlexibleIP: defines whether to automatically assign a flexible public IP to lb. Default value is `false` (do not assign). - AssignFlexibleIP *bool `json:"assign_flexible_ip"` - // Tags: list of tags for the Load Balancer. - Tags []string `json:"tags"` - // Type: load Balancer commercial offer type. Use the Load Balancer types endpoint to retrieve a list of available offer types. - Type string `json:"type"` - // SslCompatibilityLevel: determines the minimal SSL version which needs to be supported on the client side, in an SSL/TLS offloading context. Intermediate is suitable for general-purpose servers with a variety of clients, recommended for almost all systems. Modern is suitable for services with clients that support TLS 1.3 and do not need backward compatibility. Old is compatible with a small number of very old clients and should be used only as a last resort. - // Default value: ssl_compatibility_level_unknown - SslCompatibilityLevel SSLCompatibilityLevel `json:"ssl_compatibility_level"` -} - -// CreateLB: create a load balancer. +// CreateLB: Create a load balancer. func (s *API) CreateLB(req *CreateLBRequest, opts ...scw.RequestOption) (*LB, error) { var err error - defaultProjectID, exist := s.client.GetDefaultProjectID() - if exist && req.OrganizationID == nil && req.ProjectID == nil { - req.ProjectID = &defaultProjectID - } - - defaultOrganizationID, exist := s.client.GetDefaultOrganizationID() - if exist && req.OrganizationID == nil && req.ProjectID == nil { - req.OrganizationID = &defaultOrganizationID - } - if req.Region == "" { defaultRegion, _ := s.client.GetDefaultRegion() req.Region = defaultRegion } + defaultProjectID, exist := s.client.GetDefaultProjectID() + if exist && req.ProjectID == nil && req.OrganizationID == nil { + req.ProjectID = &defaultProjectID + } + + defaultOrganizationID, exist := s.client.GetDefaultOrganizationID() + if exist && req.ProjectID == nil && req.OrganizationID == nil { + req.OrganizationID = &defaultOrganizationID + } + if req.Name == "" { req.Name = namegenerator.GetRandomName("lb") } @@ -4694,9 +6286,8 @@ func (s *API) CreateLB(req *CreateLBRequest, opts ...scw.RequestOption) (*LB, er } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs", } err = scwReq.SetBody(req) @@ -4713,14 +6304,7 @@ func (s *API) CreateLB(req *CreateLBRequest, opts ...scw.RequestOption) (*LB, er return &resp, nil } -type GetLBRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` -} - -// GetLB: get a load balancer. +// GetLB: Get a load balancer. func (s *API) GetLB(req *GetLBRequest, opts ...scw.RequestOption) (*LB, error) { var err error @@ -4738,9 +6322,8 @@ func (s *API) GetLB(req *GetLBRequest, opts ...scw.RequestOption) (*LB, error) { } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "", } var resp LB @@ -4752,23 +6335,7 @@ func (s *API) GetLB(req *GetLBRequest, opts ...scw.RequestOption) (*LB, error) { return &resp, nil } -type UpdateLBRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // Name: load Balancer name. - Name string `json:"name"` - // Description: load Balancer description. - Description string `json:"description"` - // Tags: list of tags for the Load Balancer. - Tags []string `json:"tags"` - // SslCompatibilityLevel: determines the minimal SSL version which needs to be supported on the client side, in an SSL/TLS offloading context. Intermediate is suitable for general-purpose servers with a variety of clients, recommended for almost all systems. Modern is suitable for services with clients that support TLS 1.3 and don't need backward compatibility. Old is compatible with a small number of very old clients and should be used only as a last resort. - // Default value: ssl_compatibility_level_unknown - SslCompatibilityLevel SSLCompatibilityLevel `json:"ssl_compatibility_level"` -} - -// UpdateLB: update a load balancer. +// UpdateLB: Update a load balancer. func (s *API) UpdateLB(req *UpdateLBRequest, opts ...scw.RequestOption) (*LB, error) { var err error @@ -4786,9 +6353,8 @@ func (s *API) UpdateLB(req *UpdateLBRequest, opts ...scw.RequestOption) (*LB, er } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "", } err = scwReq.SetBody(req) @@ -4805,16 +6371,7 @@ func (s *API) UpdateLB(req *UpdateLBRequest, opts ...scw.RequestOption) (*LB, er return &resp, nil } -type DeleteLBRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: ID of the Load Balancer to delete. - LBID string `json:"-"` - // ReleaseIP: defines whether the Load Balancer's flexible IP should be deleted. Set to true to release the flexible IP, or false to keep it available in your account for future Load Balancers. - ReleaseIP bool `json:"-"` -} - -// DeleteLB: delete a load balancer. +// DeleteLB: Delete a load balancer. func (s *API) DeleteLB(req *DeleteLBRequest, opts ...scw.RequestOption) error { var err error @@ -4835,10 +6392,9 @@ func (s *API) DeleteLB(req *DeleteLBRequest, opts ...scw.RequestOption) error { } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "", - Query: query, - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "", + Query: query, } err = s.client.Do(scwReq, nil, opts...) @@ -4848,16 +6404,7 @@ func (s *API) DeleteLB(req *DeleteLBRequest, opts ...scw.RequestOption) error { return nil } -type MigrateLBRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // Type: load Balancer type to migrate to (use the List all Load Balancer offer types endpoint to get a list of available offer types). - Type string `json:"type"` -} - -// MigrateLB: migrate a load balancer. +// MigrateLB: Migrate a load balancer. func (s *API) MigrateLB(req *MigrateLBRequest, opts ...scw.RequestOption) (*LB, error) { var err error @@ -4875,9 +6422,8 @@ func (s *API) MigrateLB(req *MigrateLBRequest, opts ...scw.RequestOption) (*LB, } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/migrate", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/migrate", } err = scwReq.SetBody(req) @@ -4894,22 +6440,7 @@ func (s *API) MigrateLB(req *MigrateLBRequest, opts ...scw.RequestOption) (*LB, return &resp, nil } -type ListIPsRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: number of IP addresses to return. - PageSize *uint32 `json:"-"` - // IPAddress: IP address to filter for. - IPAddress *string `json:"-"` - // OrganizationID: organization ID to filter for, only Load Balancer IP addresses from this Organization will be returned. - OrganizationID *string `json:"-"` - // ProjectID: project ID to filter for, only Load Balancer IP addresses from this Project will be returned. - ProjectID *string `json:"-"` -} - -// ListIPs: list IPs. +// ListIPs: List IPs. func (s *API) ListIPs(req *ListIPsRequest, opts ...scw.RequestOption) (*ListIPsResponse, error) { var err error @@ -4935,10 +6466,9 @@ func (s *API) ListIPs(req *ListIPsRequest, opts ...scw.RequestOption) (*ListIPsR } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/ips", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/ips", + Query: query, } var resp ListIPsResponse @@ -4950,46 +6480,32 @@ func (s *API) ListIPs(req *ListIPsRequest, opts ...scw.RequestOption) (*ListIPsR return &resp, nil } -type CreateIPRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // Deprecated: OrganizationID: organization ID of the Organization where the IP address should be created. - // Precisely one of OrganizationID, ProjectID must be set. - OrganizationID *string `json:"organization_id,omitempty"` - // ProjectID: project ID of the Project where the IP address should be created. - // Precisely one of OrganizationID, ProjectID must be set. - ProjectID *string `json:"project_id,omitempty"` - // Reverse: reverse DNS (domain name) for the IP address. - Reverse *string `json:"reverse"` -} - -// CreateIP: create an IP. +// CreateIP: Create an IP. func (s *API) CreateIP(req *CreateIPRequest, opts ...scw.RequestOption) (*IP, error) { var err error - defaultProjectID, exist := s.client.GetDefaultProjectID() - if exist && req.OrganizationID == nil && req.ProjectID == nil { - req.ProjectID = &defaultProjectID - } - - defaultOrganizationID, exist := s.client.GetDefaultOrganizationID() - if exist && req.OrganizationID == nil && req.ProjectID == nil { - req.OrganizationID = &defaultOrganizationID - } - if req.Region == "" { defaultRegion, _ := s.client.GetDefaultRegion() req.Region = defaultRegion } + defaultProjectID, exist := s.client.GetDefaultProjectID() + if exist && req.ProjectID == nil && req.OrganizationID == nil { + req.ProjectID = &defaultProjectID + } + + defaultOrganizationID, exist := s.client.GetDefaultOrganizationID() + if exist && req.ProjectID == nil && req.OrganizationID == nil { + req.OrganizationID = &defaultOrganizationID + } + if fmt.Sprint(req.Region) == "" { return nil, errors.New("field Region cannot be empty in request") } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/ips", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/ips", } err = scwReq.SetBody(req) @@ -5006,14 +6522,7 @@ func (s *API) CreateIP(req *CreateIPRequest, opts ...scw.RequestOption) (*IP, er return &resp, nil } -type GetIPRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // IPID: IP address ID. - IPID string `json:"-"` -} - -// GetIP: get an IP. +// GetIP: Get an IP. func (s *API) GetIP(req *GetIPRequest, opts ...scw.RequestOption) (*IP, error) { var err error @@ -5031,9 +6540,8 @@ func (s *API) GetIP(req *GetIPRequest, opts ...scw.RequestOption) (*IP, error) { } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/ips/" + fmt.Sprint(req.IPID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/ips/" + fmt.Sprint(req.IPID) + "", } var resp IP @@ -5045,14 +6553,7 @@ func (s *API) GetIP(req *GetIPRequest, opts ...scw.RequestOption) (*IP, error) { return &resp, nil } -type ReleaseIPRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // IPID: IP address ID. - IPID string `json:"-"` -} - -// ReleaseIP: delete an IP. +// ReleaseIP: Delete an IP. func (s *API) ReleaseIP(req *ReleaseIPRequest, opts ...scw.RequestOption) error { var err error @@ -5070,9 +6571,8 @@ func (s *API) ReleaseIP(req *ReleaseIPRequest, opts ...scw.RequestOption) error } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/ips/" + fmt.Sprint(req.IPID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/ips/" + fmt.Sprint(req.IPID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -5082,16 +6582,7 @@ func (s *API) ReleaseIP(req *ReleaseIPRequest, opts ...scw.RequestOption) error return nil } -type UpdateIPRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // IPID: IP address ID. - IPID string `json:"-"` - // Reverse: reverse DNS (domain name) for the IP address. - Reverse *string `json:"reverse"` -} - -// UpdateIP: update an IP. +// UpdateIP: Update an IP. func (s *API) UpdateIP(req *UpdateIPRequest, opts ...scw.RequestOption) (*IP, error) { var err error @@ -5109,9 +6600,8 @@ func (s *API) UpdateIP(req *UpdateIPRequest, opts ...scw.RequestOption) (*IP, er } scwReq := &scw.ScalewayRequest{ - Method: "PATCH", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/ips/" + fmt.Sprint(req.IPID) + "", - Headers: http.Header{}, + Method: "PATCH", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/ips/" + fmt.Sprint(req.IPID) + "", } err = scwReq.SetBody(req) @@ -5128,23 +6618,7 @@ func (s *API) UpdateIP(req *UpdateIPRequest, opts ...scw.RequestOption) (*IP, er return &resp, nil } -type ListBackendsRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // Name: name of the backend to filter for. - Name *string `json:"-"` - // OrderBy: sort order of backends in the response. - // Default value: created_at_asc - OrderBy ListBackendsRequestOrderBy `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: number of backends to return. - PageSize *uint32 `json:"-"` -} - -// ListBackends: list backends in a given load balancer. +// ListBackends: List backends in a given load balancer. func (s *API) ListBackends(req *ListBackendsRequest, opts ...scw.RequestOption) (*ListBackendsResponse, error) { var err error @@ -5173,10 +6647,9 @@ func (s *API) ListBackends(req *ListBackendsRequest, opts ...scw.RequestOption) } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/backends", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/backends", + Query: query, } var resp ListBackendsResponse @@ -5188,104 +6661,7 @@ func (s *API) ListBackends(req *ListBackendsRequest, opts ...scw.RequestOption) return &resp, nil } -type CreateBackendRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // Name: name for the backend. - Name string `json:"name"` - // ForwardProtocol: protocol to be used by the backend when forwarding traffic to backend servers. - // Default value: tcp - ForwardProtocol Protocol `json:"forward_protocol"` - // ForwardPort: port to be used by the backend when forwarding traffic to backend servers. - ForwardPort int32 `json:"forward_port"` - // ForwardPortAlgorithm: load balancing algorithm to be used when determining which backend server to forward new traffic to. - // Default value: roundrobin - ForwardPortAlgorithm ForwardPortAlgorithm `json:"forward_port_algorithm"` - // StickySessions: defines whether to activate sticky sessions (binding a particular session to a particular backend server) and the method to use if so. None disables sticky sessions. Cookie-based uses an HTTP cookie TO stick a session to a backend server. Table-based uses the source (client) IP address to stick a session to a backend server. - // Default value: none - StickySessions StickySessionsType `json:"sticky_sessions"` - // StickySessionsCookieName: cookie name for cookie-based sticky sessions. - StickySessionsCookieName string `json:"sticky_sessions_cookie_name"` - // HealthCheck: object defining the health check to be carried out by the backend when checking the status and health of backend servers. - HealthCheck *HealthCheck `json:"health_check"` - // ServerIP: list of backend server IP addresses (IPv4 or IPv6) the backend should forward traffic to. - ServerIP []string `json:"server_ip"` - // Deprecated: SendProxyV2: deprecated in favor of proxy_protocol field. - SendProxyV2 *bool `json:"send_proxy_v2,omitempty"` - // TimeoutServer: maximum allowed time for a backend server to process a request. - // Default value: 300000 - TimeoutServer *time.Duration `json:"timeout_server"` - // TimeoutConnect: maximum allowed time for establishing a connection to a backend server. - // Default value: 5000 - TimeoutConnect *time.Duration `json:"timeout_connect"` - // TimeoutTunnel: maximum allowed tunnel inactivity time after Websocket is established (takes precedence over client and server timeout). - // Default value: 900000 - TimeoutTunnel *time.Duration `json:"timeout_tunnel"` - // OnMarkedDownAction: action to take when a backend server is marked as down. - // Default value: on_marked_down_action_none - OnMarkedDownAction OnMarkedDownAction `json:"on_marked_down_action"` - // ProxyProtocol: protocol to use between the Load Balancer and backend servers. Allows the backend servers to be informed of the client's real IP address. The PROXY protocol must be supported by the backend servers' software. - // Default value: proxy_protocol_unknown - ProxyProtocol ProxyProtocol `json:"proxy_protocol"` - // FailoverHost: scaleway S3 bucket website to be served as failover if all backend servers are down, e.g. failover-website.s3-website.fr-par.scw.cloud. - FailoverHost *string `json:"failover_host"` - // SslBridging: defines whether to enable SSL bridging between the Load Balancer and backend servers. - SslBridging *bool `json:"ssl_bridging"` - // IgnoreSslServerVerify: defines whether the server certificate verification should be ignored. - IgnoreSslServerVerify *bool `json:"ignore_ssl_server_verify"` - // RedispatchAttemptCount: whether to use another backend server on each attempt. - RedispatchAttemptCount *int32 `json:"redispatch_attempt_count"` - // MaxRetries: number of retries when a backend server connection failed. - MaxRetries *int32 `json:"max_retries"` - // MaxConnections: maximum number of connections allowed per backend server. - MaxConnections *int32 `json:"max_connections"` - // TimeoutQueue: maximum time for a request to be left pending in queue when `max_connections` is reached. - TimeoutQueue *scw.Duration `json:"timeout_queue"` -} - -func (m *CreateBackendRequest) UnmarshalJSON(b []byte) error { - type tmpType CreateBackendRequest - tmp := struct { - tmpType - - TmpTimeoutServer *marshaler.Duration `json:"timeout_server"` - TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect"` - TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel"` - }{} - err := json.Unmarshal(b, &tmp) - if err != nil { - return err - } - - *m = CreateBackendRequest(tmp.tmpType) - - m.TimeoutServer = tmp.TmpTimeoutServer.Standard() - m.TimeoutConnect = tmp.TmpTimeoutConnect.Standard() - m.TimeoutTunnel = tmp.TmpTimeoutTunnel.Standard() - return nil -} - -func (m CreateBackendRequest) MarshalJSON() ([]byte, error) { - type tmpType CreateBackendRequest - tmp := struct { - tmpType - - TmpTimeoutServer *marshaler.Duration `json:"timeout_server"` - TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect"` - TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel"` - }{ - tmpType: tmpType(m), - - TmpTimeoutServer: marshaler.NewDuration(m.TimeoutServer), - TmpTimeoutConnect: marshaler.NewDuration(m.TimeoutConnect), - TmpTimeoutTunnel: marshaler.NewDuration(m.TimeoutTunnel), - } - return json.Marshal(tmp) -} - -// CreateBackend: create a backend in a given load balancer. +// CreateBackend: Create a backend in a given load balancer. func (s *API) CreateBackend(req *CreateBackendRequest, opts ...scw.RequestOption) (*Backend, error) { var err error @@ -5307,9 +6683,8 @@ func (s *API) CreateBackend(req *CreateBackendRequest, opts ...scw.RequestOption } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/backends", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/backends", } err = scwReq.SetBody(req) @@ -5326,14 +6701,7 @@ func (s *API) CreateBackend(req *CreateBackendRequest, opts ...scw.RequestOption return &resp, nil } -type GetBackendRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // BackendID: backend ID. - BackendID string `json:"-"` -} - -// GetBackend: get a backend in a given load balancer. +// GetBackend: Get a backend in a given load balancer. func (s *API) GetBackend(req *GetBackendRequest, opts ...scw.RequestOption) (*Backend, error) { var err error @@ -5351,9 +6719,8 @@ func (s *API) GetBackend(req *GetBackendRequest, opts ...scw.RequestOption) (*Ba } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/backends/" + fmt.Sprint(req.BackendID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/backends/" + fmt.Sprint(req.BackendID) + "", } var resp Backend @@ -5365,100 +6732,7 @@ func (s *API) GetBackend(req *GetBackendRequest, opts ...scw.RequestOption) (*Ba return &resp, nil } -type UpdateBackendRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // BackendID: backend ID. - BackendID string `json:"-"` - // Name: backend name. - Name string `json:"name"` - // ForwardProtocol: protocol to be used by the backend when forwarding traffic to backend servers. - // Default value: tcp - ForwardProtocol Protocol `json:"forward_protocol"` - // ForwardPort: port to be used by the backend when forwarding traffic to backend servers. - ForwardPort int32 `json:"forward_port"` - // ForwardPortAlgorithm: load balancing algorithm to be used when determining which backend server to forward new traffic to. - // Default value: roundrobin - ForwardPortAlgorithm ForwardPortAlgorithm `json:"forward_port_algorithm"` - // StickySessions: defines whether to activate sticky sessions (binding a particular session to a particular backend server) and the method to use if so. None disables sticky sessions. Cookie-based uses an HTTP cookie to stick a session to a backend server. Table-based uses the source (client) IP address to stick a session to a backend server. - // Default value: none - StickySessions StickySessionsType `json:"sticky_sessions"` - // StickySessionsCookieName: cookie name for cookie-based sticky sessions. - StickySessionsCookieName string `json:"sticky_sessions_cookie_name"` - // Deprecated: SendProxyV2: deprecated in favor of proxy_protocol field. - SendProxyV2 *bool `json:"send_proxy_v2,omitempty"` - // TimeoutServer: maximum allowed time for a backend server to process a request. - // Default value: 300000 - TimeoutServer *time.Duration `json:"timeout_server"` - // TimeoutConnect: maximum allowed time for establishing a connection to a backend server. - // Default value: 5000 - TimeoutConnect *time.Duration `json:"timeout_connect"` - // TimeoutTunnel: maximum allowed tunnel inactivity time after Websocket is established (takes precedence over client and server timeout). - // Default value: 900000 - TimeoutTunnel *time.Duration `json:"timeout_tunnel"` - // OnMarkedDownAction: action to take when a backend server is marked as down. - // Default value: on_marked_down_action_none - OnMarkedDownAction OnMarkedDownAction `json:"on_marked_down_action"` - // ProxyProtocol: protocol to use between the Load Balancer and backend servers. Allows the backend servers to be informed of the client's real IP address. The PROXY protocol must be supported by the backend servers' software. - // Default value: proxy_protocol_unknown - ProxyProtocol ProxyProtocol `json:"proxy_protocol"` - // FailoverHost: scaleway S3 bucket website to be served as failover if all backend servers are down, e.g. failover-website.s3-website.fr-par.scw.cloud. - FailoverHost *string `json:"failover_host"` - // SslBridging: defines whether to enable SSL bridging between the Load Balancer and backend servers. - SslBridging *bool `json:"ssl_bridging"` - // IgnoreSslServerVerify: defines whether the server certificate verification should be ignored. - IgnoreSslServerVerify *bool `json:"ignore_ssl_server_verify"` - // RedispatchAttemptCount: whether to use another backend server on each attempt. - RedispatchAttemptCount *int32 `json:"redispatch_attempt_count"` - // MaxRetries: number of retries when a backend server connection failed. - MaxRetries *int32 `json:"max_retries"` - // MaxConnections: maximum number of connections allowed per backend server. - MaxConnections *int32 `json:"max_connections"` - // TimeoutQueue: maximum time for a request to be left pending in queue when `max_connections` is reached. - TimeoutQueue *scw.Duration `json:"timeout_queue"` -} - -func (m *UpdateBackendRequest) UnmarshalJSON(b []byte) error { - type tmpType UpdateBackendRequest - tmp := struct { - tmpType - - TmpTimeoutServer *marshaler.Duration `json:"timeout_server"` - TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect"` - TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel"` - }{} - err := json.Unmarshal(b, &tmp) - if err != nil { - return err - } - - *m = UpdateBackendRequest(tmp.tmpType) - - m.TimeoutServer = tmp.TmpTimeoutServer.Standard() - m.TimeoutConnect = tmp.TmpTimeoutConnect.Standard() - m.TimeoutTunnel = tmp.TmpTimeoutTunnel.Standard() - return nil -} - -func (m UpdateBackendRequest) MarshalJSON() ([]byte, error) { - type tmpType UpdateBackendRequest - tmp := struct { - tmpType - - TmpTimeoutServer *marshaler.Duration `json:"timeout_server"` - TmpTimeoutConnect *marshaler.Duration `json:"timeout_connect"` - TmpTimeoutTunnel *marshaler.Duration `json:"timeout_tunnel"` - }{ - tmpType: tmpType(m), - - TmpTimeoutServer: marshaler.NewDuration(m.TimeoutServer), - TmpTimeoutConnect: marshaler.NewDuration(m.TimeoutConnect), - TmpTimeoutTunnel: marshaler.NewDuration(m.TimeoutTunnel), - } - return json.Marshal(tmp) -} - -// UpdateBackend: update a backend in a given load balancer. +// UpdateBackend: Update a backend in a given load balancer. func (s *API) UpdateBackend(req *UpdateBackendRequest, opts ...scw.RequestOption) (*Backend, error) { var err error @@ -5476,9 +6750,8 @@ func (s *API) UpdateBackend(req *UpdateBackendRequest, opts ...scw.RequestOption } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/backends/" + fmt.Sprint(req.BackendID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/backends/" + fmt.Sprint(req.BackendID) + "", } err = scwReq.SetBody(req) @@ -5495,14 +6768,7 @@ func (s *API) UpdateBackend(req *UpdateBackendRequest, opts ...scw.RequestOption return &resp, nil } -type DeleteBackendRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // BackendID: ID of the backend to delete. - BackendID string `json:"-"` -} - -// DeleteBackend: delete a backend in a given load balancer. +// DeleteBackend: Delete a backend in a given load balancer. func (s *API) DeleteBackend(req *DeleteBackendRequest, opts ...scw.RequestOption) error { var err error @@ -5520,9 +6786,8 @@ func (s *API) DeleteBackend(req *DeleteBackendRequest, opts ...scw.RequestOption } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/backends/" + fmt.Sprint(req.BackendID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/backends/" + fmt.Sprint(req.BackendID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -5532,16 +6797,7 @@ func (s *API) DeleteBackend(req *DeleteBackendRequest, opts ...scw.RequestOption return nil } -type AddBackendServersRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // BackendID: backend ID. - BackendID string `json:"-"` - // ServerIP: list of IP addresses to add to backend servers. - ServerIP []string `json:"server_ip"` -} - -// AddBackendServers: add a set of servers in a given backend. +// AddBackendServers: Add a set of servers in a given backend. func (s *API) AddBackendServers(req *AddBackendServersRequest, opts ...scw.RequestOption) (*Backend, error) { var err error @@ -5559,9 +6815,8 @@ func (s *API) AddBackendServers(req *AddBackendServersRequest, opts ...scw.Reque } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/backends/" + fmt.Sprint(req.BackendID) + "/servers", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/backends/" + fmt.Sprint(req.BackendID) + "/servers", } err = scwReq.SetBody(req) @@ -5578,16 +6833,7 @@ func (s *API) AddBackendServers(req *AddBackendServersRequest, opts ...scw.Reque return &resp, nil } -type RemoveBackendServersRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // BackendID: backend ID. - BackendID string `json:"-"` - // ServerIP: list of IP addresses to remove from backend servers. - ServerIP []string `json:"server_ip"` -} - -// RemoveBackendServers: remove a set of servers for a given backend. +// RemoveBackendServers: Remove a set of servers for a given backend. func (s *API) RemoveBackendServers(req *RemoveBackendServersRequest, opts ...scw.RequestOption) (*Backend, error) { var err error @@ -5605,9 +6851,8 @@ func (s *API) RemoveBackendServers(req *RemoveBackendServersRequest, opts ...scw } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/backends/" + fmt.Sprint(req.BackendID) + "/servers", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/backends/" + fmt.Sprint(req.BackendID) + "/servers", } err = scwReq.SetBody(req) @@ -5624,16 +6869,7 @@ func (s *API) RemoveBackendServers(req *RemoveBackendServersRequest, opts ...scw return &resp, nil } -type SetBackendServersRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // BackendID: backend ID. - BackendID string `json:"-"` - // ServerIP: list of IP addresses for backend servers. Any other existing backend servers will be removed. - ServerIP []string `json:"server_ip"` -} - -// SetBackendServers: define all servers in a given backend. +// SetBackendServers: Define all servers in a given backend. func (s *API) SetBackendServers(req *SetBackendServersRequest, opts ...scw.RequestOption) (*Backend, error) { var err error @@ -5651,9 +6887,8 @@ func (s *API) SetBackendServers(req *SetBackendServersRequest, opts ...scw.Reque } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/backends/" + fmt.Sprint(req.BackendID) + "/servers", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/backends/" + fmt.Sprint(req.BackendID) + "/servers", } err = scwReq.SetBody(req) @@ -5670,84 +6905,7 @@ func (s *API) SetBackendServers(req *SetBackendServersRequest, opts ...scw.Reque return &resp, nil } -type UpdateHealthCheckRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // BackendID: backend ID. - BackendID string `json:"-"` - // Port: port to use for the backend server health check. - Port int32 `json:"port"` - // CheckDelay: time to wait between two consecutive health checks. - CheckDelay *time.Duration `json:"check_delay"` - // CheckTimeout: maximum time a backend server has to reply to the health check. - CheckTimeout *time.Duration `json:"check_timeout"` - // CheckMaxRetries: number of consecutive unsuccessful health checks after which the server will be considered dead. - CheckMaxRetries int32 `json:"check_max_retries"` - // CheckSendProxy: defines whether proxy protocol should be activated for the health check. - CheckSendProxy bool `json:"check_send_proxy"` - // TCPConfig: object to configure a basic TCP health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - TCPConfig *HealthCheckTCPConfig `json:"tcp_config,omitempty"` - // MysqlConfig: object to configure a MySQL health check. The check requires MySQL >=3.22, for older versions, use a TCP health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - MysqlConfig *HealthCheckMysqlConfig `json:"mysql_config,omitempty"` - // PgsqlConfig: object to configure a PostgreSQL health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - PgsqlConfig *HealthCheckPgsqlConfig `json:"pgsql_config,omitempty"` - // LdapConfig: object to configure an LDAP health check. The response is analyzed to find the LDAPv3 response message. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - LdapConfig *HealthCheckLdapConfig `json:"ldap_config,omitempty"` - // RedisConfig: object to configure a Redis health check. The response is analyzed to find the +PONG response message. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - RedisConfig *HealthCheckRedisConfig `json:"redis_config,omitempty"` - // HTTPConfig: object to configure an HTTP health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - HTTPConfig *HealthCheckHTTPConfig `json:"http_config,omitempty"` - // HTTPSConfig: object to configure an HTTPS health check. - // Precisely one of HTTPConfig, HTTPSConfig, LdapConfig, MysqlConfig, PgsqlConfig, RedisConfig, TCPConfig must be set. - HTTPSConfig *HealthCheckHTTPSConfig `json:"https_config,omitempty"` - // TransientCheckDelay: time to wait between two consecutive health checks when a backend server is in a transient state (going UP or DOWN). - // Default value: 0.5s - TransientCheckDelay *scw.Duration `json:"transient_check_delay"` -} - -func (m *UpdateHealthCheckRequest) UnmarshalJSON(b []byte) error { - type tmpType UpdateHealthCheckRequest - tmp := struct { - tmpType - - TmpCheckDelay *marshaler.Duration `json:"check_delay"` - TmpCheckTimeout *marshaler.Duration `json:"check_timeout"` - }{} - err := json.Unmarshal(b, &tmp) - if err != nil { - return err - } - - *m = UpdateHealthCheckRequest(tmp.tmpType) - - m.CheckDelay = tmp.TmpCheckDelay.Standard() - m.CheckTimeout = tmp.TmpCheckTimeout.Standard() - return nil -} - -func (m UpdateHealthCheckRequest) MarshalJSON() ([]byte, error) { - type tmpType UpdateHealthCheckRequest - tmp := struct { - tmpType - - TmpCheckDelay *marshaler.Duration `json:"check_delay"` - TmpCheckTimeout *marshaler.Duration `json:"check_timeout"` - }{ - tmpType: tmpType(m), - - TmpCheckDelay: marshaler.NewDuration(m.CheckDelay), - TmpCheckTimeout: marshaler.NewDuration(m.CheckTimeout), - } - return json.Marshal(tmp) -} - -// UpdateHealthCheck: update an health check for a given backend. +// UpdateHealthCheck: Update an health check for a given backend. func (s *API) UpdateHealthCheck(req *UpdateHealthCheckRequest, opts ...scw.RequestOption) (*HealthCheck, error) { var err error @@ -5765,9 +6923,8 @@ func (s *API) UpdateHealthCheck(req *UpdateHealthCheckRequest, opts ...scw.Reque } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/backends/" + fmt.Sprint(req.BackendID) + "/healthcheck", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/backends/" + fmt.Sprint(req.BackendID) + "/healthcheck", } err = scwReq.SetBody(req) @@ -5784,23 +6941,7 @@ func (s *API) UpdateHealthCheck(req *UpdateHealthCheckRequest, opts ...scw.Reque return &resp, nil } -type ListFrontendsRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // Name: name of the frontend to filter for. - Name *string `json:"-"` - // OrderBy: sort order of frontends in the response. - // Default value: created_at_asc - OrderBy ListFrontendsRequestOrderBy `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: number of frontends to return. - PageSize *uint32 `json:"-"` -} - -// ListFrontends: list frontends in a given load balancer. +// ListFrontends: List frontends in a given load balancer. func (s *API) ListFrontends(req *ListFrontendsRequest, opts ...scw.RequestOption) (*ListFrontendsResponse, error) { var err error @@ -5829,10 +6970,9 @@ func (s *API) ListFrontends(req *ListFrontendsRequest, opts ...scw.RequestOption } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/frontends", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/frontends", + Query: query, } var resp ListFrontendsResponse @@ -5844,61 +6984,7 @@ func (s *API) ListFrontends(req *ListFrontendsRequest, opts ...scw.RequestOption return &resp, nil } -type CreateFrontendRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID (ID of the Load Balancer to attach the frontend to). - LBID string `json:"-"` - // Name: name for the frontend. - Name string `json:"name"` - // InboundPort: port the frontend should listen on. - InboundPort int32 `json:"inbound_port"` - // BackendID: backend ID (ID of the backend the frontend should pass traffic to). - BackendID string `json:"backend_id"` - // TimeoutClient: maximum allowed inactivity time on the client side. - // Default value: 300000 - TimeoutClient *time.Duration `json:"timeout_client"` - // Deprecated: CertificateID: certificate ID, deprecated in favor of certificate_ids array. - CertificateID *string `json:"certificate_id,omitempty"` - // CertificateIDs: list of SSL/TLS certificate IDs to bind to the frontend. - CertificateIDs *[]string `json:"certificate_ids"` - // EnableHTTP3: defines whether to enable HTTP/3 protocol on the frontend. - EnableHTTP3 bool `json:"enable_http3"` -} - -func (m *CreateFrontendRequest) UnmarshalJSON(b []byte) error { - type tmpType CreateFrontendRequest - tmp := struct { - tmpType - - TmpTimeoutClient *marshaler.Duration `json:"timeout_client"` - }{} - err := json.Unmarshal(b, &tmp) - if err != nil { - return err - } - - *m = CreateFrontendRequest(tmp.tmpType) - - m.TimeoutClient = tmp.TmpTimeoutClient.Standard() - return nil -} - -func (m CreateFrontendRequest) MarshalJSON() ([]byte, error) { - type tmpType CreateFrontendRequest - tmp := struct { - tmpType - - TmpTimeoutClient *marshaler.Duration `json:"timeout_client"` - }{ - tmpType: tmpType(m), - - TmpTimeoutClient: marshaler.NewDuration(m.TimeoutClient), - } - return json.Marshal(tmp) -} - -// CreateFrontend: create a frontend in a given load balancer. +// CreateFrontend: Create a frontend in a given load balancer. func (s *API) CreateFrontend(req *CreateFrontendRequest, opts ...scw.RequestOption) (*Frontend, error) { var err error @@ -5920,9 +7006,8 @@ func (s *API) CreateFrontend(req *CreateFrontendRequest, opts ...scw.RequestOpti } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/frontends", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/frontends", } err = scwReq.SetBody(req) @@ -5939,14 +7024,7 @@ func (s *API) CreateFrontend(req *CreateFrontendRequest, opts ...scw.RequestOpti return &resp, nil } -type GetFrontendRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // FrontendID: frontend ID. - FrontendID string `json:"-"` -} - -// GetFrontend: get a frontend. +// GetFrontend: Get a frontend. func (s *API) GetFrontend(req *GetFrontendRequest, opts ...scw.RequestOption) (*Frontend, error) { var err error @@ -5964,9 +7042,8 @@ func (s *API) GetFrontend(req *GetFrontendRequest, opts ...scw.RequestOption) (* } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/frontends/" + fmt.Sprint(req.FrontendID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/frontends/" + fmt.Sprint(req.FrontendID) + "", } var resp Frontend @@ -5978,61 +7055,7 @@ func (s *API) GetFrontend(req *GetFrontendRequest, opts ...scw.RequestOption) (* return &resp, nil } -type UpdateFrontendRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // FrontendID: frontend ID. - FrontendID string `json:"-"` - // Name: frontend name. - Name string `json:"name"` - // InboundPort: port the frontend should listen on. - InboundPort int32 `json:"inbound_port"` - // BackendID: backend ID (ID of the backend the frontend should pass traffic to). - BackendID string `json:"backend_id"` - // TimeoutClient: maximum allowed inactivity time on the client side. - // Default value: 300000 - TimeoutClient *time.Duration `json:"timeout_client"` - // Deprecated: CertificateID: certificate ID, deprecated in favor of certificate_ids array. - CertificateID *string `json:"certificate_id,omitempty"` - // CertificateIDs: list of SSL/TLS certificate IDs to bind to the frontend. - CertificateIDs *[]string `json:"certificate_ids"` - // EnableHTTP3: defines whether to enable HTTP/3 protocol on the frontend. - EnableHTTP3 bool `json:"enable_http3"` -} - -func (m *UpdateFrontendRequest) UnmarshalJSON(b []byte) error { - type tmpType UpdateFrontendRequest - tmp := struct { - tmpType - - TmpTimeoutClient *marshaler.Duration `json:"timeout_client"` - }{} - err := json.Unmarshal(b, &tmp) - if err != nil { - return err - } - - *m = UpdateFrontendRequest(tmp.tmpType) - - m.TimeoutClient = tmp.TmpTimeoutClient.Standard() - return nil -} - -func (m UpdateFrontendRequest) MarshalJSON() ([]byte, error) { - type tmpType UpdateFrontendRequest - tmp := struct { - tmpType - - TmpTimeoutClient *marshaler.Duration `json:"timeout_client"` - }{ - tmpType: tmpType(m), - - TmpTimeoutClient: marshaler.NewDuration(m.TimeoutClient), - } - return json.Marshal(tmp) -} - -// UpdateFrontend: update a frontend. +// UpdateFrontend: Update a frontend. func (s *API) UpdateFrontend(req *UpdateFrontendRequest, opts ...scw.RequestOption) (*Frontend, error) { var err error @@ -6050,9 +7073,8 @@ func (s *API) UpdateFrontend(req *UpdateFrontendRequest, opts ...scw.RequestOpti } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/frontends/" + fmt.Sprint(req.FrontendID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/frontends/" + fmt.Sprint(req.FrontendID) + "", } err = scwReq.SetBody(req) @@ -6069,14 +7091,7 @@ func (s *API) UpdateFrontend(req *UpdateFrontendRequest, opts ...scw.RequestOpti return &resp, nil } -type DeleteFrontendRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // FrontendID: ID of the frontend to delete. - FrontendID string `json:"-"` -} - -// DeleteFrontend: delete a frontend. +// DeleteFrontend: Delete a frontend. func (s *API) DeleteFrontend(req *DeleteFrontendRequest, opts ...scw.RequestOption) error { var err error @@ -6094,9 +7109,8 @@ func (s *API) DeleteFrontend(req *DeleteFrontendRequest, opts ...scw.RequestOpti } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/frontends/" + fmt.Sprint(req.FrontendID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/frontends/" + fmt.Sprint(req.FrontendID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -6106,21 +7120,7 @@ func (s *API) DeleteFrontend(req *DeleteFrontendRequest, opts ...scw.RequestOpti return nil } -type ListRoutesRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // OrderBy: sort order of routes in the response. - // Default value: created_at_asc - OrderBy ListRoutesRequestOrderBy `json:"-"` - // PageSize: the number of route objects to return. - PageSize *uint32 `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // FrontendID: frontend ID to filter for, only Routes from this Frontend will be returned. - FrontendID *string `json:"-"` -} - -// ListRoutes: list all backend redirections. +// ListRoutes: List all backend redirections. func (s *API) ListRoutes(req *ListRoutesRequest, opts ...scw.RequestOption) (*ListRoutesResponse, error) { var err error @@ -6145,10 +7145,9 @@ func (s *API) ListRoutes(req *ListRoutesRequest, opts ...scw.RequestOption) (*Li } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/routes", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/routes", + Query: query, } var resp ListRoutesResponse @@ -6160,18 +7159,7 @@ func (s *API) ListRoutes(req *ListRoutesRequest, opts ...scw.RequestOption) (*Li return &resp, nil } -type CreateRouteRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // FrontendID: ID of the source frontend to create the route on. - FrontendID string `json:"frontend_id"` - // BackendID: ID of the target backend for the route. - BackendID string `json:"backend_id"` - // Match: object defining the match condition for a route to be applied. If an incoming client session matches the specified condition (i.e. it has a matching SNI value or HTTP Host header value), it will be passed to the target backend. - Match *RouteMatch `json:"match"` -} - -// CreateRoute: create a backend redirection. +// CreateRoute: Create a backend redirection. func (s *API) CreateRoute(req *CreateRouteRequest, opts ...scw.RequestOption) (*Route, error) { var err error @@ -6185,9 +7173,8 @@ func (s *API) CreateRoute(req *CreateRouteRequest, opts ...scw.RequestOption) (* } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/routes", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/routes", } err = scwReq.SetBody(req) @@ -6204,14 +7191,7 @@ func (s *API) CreateRoute(req *CreateRouteRequest, opts ...scw.RequestOption) (* return &resp, nil } -type GetRouteRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // RouteID: route ID. - RouteID string `json:"-"` -} - -// GetRoute: get single backend redirection. +// GetRoute: Get single backend redirection. func (s *API) GetRoute(req *GetRouteRequest, opts ...scw.RequestOption) (*Route, error) { var err error @@ -6229,9 +7209,8 @@ func (s *API) GetRoute(req *GetRouteRequest, opts ...scw.RequestOption) (*Route, } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/routes/" + fmt.Sprint(req.RouteID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/routes/" + fmt.Sprint(req.RouteID) + "", } var resp Route @@ -6243,18 +7222,7 @@ func (s *API) GetRoute(req *GetRouteRequest, opts ...scw.RequestOption) (*Route, return &resp, nil } -type UpdateRouteRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // RouteID: route ID. - RouteID string `json:"-"` - // BackendID: ID of the target backend for the route. - BackendID string `json:"backend_id"` - // Match: object defining the match condition for a route to be applied. If an incoming client session matches the specified condition (i.e. it has a matching SNI value or HTTP Host header value), it will be passed to the target backend. - Match *RouteMatch `json:"match"` -} - -// UpdateRoute: edit a backend redirection. +// UpdateRoute: Edit a backend redirection. func (s *API) UpdateRoute(req *UpdateRouteRequest, opts ...scw.RequestOption) (*Route, error) { var err error @@ -6272,9 +7240,8 @@ func (s *API) UpdateRoute(req *UpdateRouteRequest, opts ...scw.RequestOption) (* } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/routes/" + fmt.Sprint(req.RouteID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/routes/" + fmt.Sprint(req.RouteID) + "", } err = scwReq.SetBody(req) @@ -6291,14 +7258,7 @@ func (s *API) UpdateRoute(req *UpdateRouteRequest, opts ...scw.RequestOption) (* return &resp, nil } -type DeleteRouteRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // RouteID: route ID. - RouteID string `json:"-"` -} - -// DeleteRoute: delete a backend redirection. +// DeleteRoute: Delete a backend redirection. func (s *API) DeleteRoute(req *DeleteRouteRequest, opts ...scw.RequestOption) error { var err error @@ -6316,9 +7276,8 @@ func (s *API) DeleteRoute(req *DeleteRouteRequest, opts ...scw.RequestOption) er } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/routes/" + fmt.Sprint(req.RouteID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/routes/" + fmt.Sprint(req.RouteID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -6328,16 +7287,7 @@ func (s *API) DeleteRoute(req *DeleteRouteRequest, opts ...scw.RequestOption) er return nil } -type GetLBStatsRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // BackendID: ID of the backend. - BackendID *string `json:"-"` -} - -// Deprecated: GetLBStats: get usage statistics of a given load balancer. +// Deprecated: GetLBStats: Get usage statistics of a given load balancer. func (s *API) GetLBStats(req *GetLBStatsRequest, opts ...scw.RequestOption) (*LBStats, error) { var err error @@ -6358,10 +7308,9 @@ func (s *API) GetLBStats(req *GetLBStatsRequest, opts ...scw.RequestOption) (*LB } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/stats", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/stats", + Query: query, } var resp LBStats @@ -6373,20 +7322,7 @@ func (s *API) GetLBStats(req *GetLBStatsRequest, opts ...scw.RequestOption) (*LB return &resp, nil } -type ListBackendStatsRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: number of items to return. - PageSize *uint32 `json:"-"` - // BackendID: ID of the backend. - BackendID *string `json:"-"` -} - -// ListBackendStats: list backend server statistics. +// ListBackendStats: List backend server statistics. func (s *API) ListBackendStats(req *ListBackendStatsRequest, opts ...scw.RequestOption) (*ListBackendStatsResponse, error) { var err error @@ -6414,10 +7350,9 @@ func (s *API) ListBackendStats(req *ListBackendStatsRequest, opts ...scw.Request } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/backend-stats", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/backend-stats", + Query: query, } var resp ListBackendStatsResponse @@ -6429,23 +7364,7 @@ func (s *API) ListBackendStats(req *ListBackendStatsRequest, opts ...scw.Request return &resp, nil } -type ListACLsRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // FrontendID: frontend ID (ACLs attached to this frontend will be returned in the response). - FrontendID string `json:"-"` - // OrderBy: sort order of ACLs in the response. - // Default value: created_at_asc - OrderBy ListACLRequestOrderBy `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: the number of ACLs to return. - PageSize *uint32 `json:"-"` - // Name: ACL name to filter for. - Name *string `json:"-"` -} - -// ListACLs: list ACL for a given frontend. +// ListACLs: List ACL for a given frontend. func (s *API) ListACLs(req *ListACLsRequest, opts ...scw.RequestOption) (*ListACLResponse, error) { var err error @@ -6474,10 +7393,9 @@ func (s *API) ListACLs(req *ListACLsRequest, opts ...scw.RequestOption) (*ListAC } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/frontends/" + fmt.Sprint(req.FrontendID) + "/acls", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/frontends/" + fmt.Sprint(req.FrontendID) + "/acls", + Query: query, } var resp ListACLResponse @@ -6489,24 +7407,7 @@ func (s *API) ListACLs(req *ListACLsRequest, opts ...scw.RequestOption) (*ListAC return &resp, nil } -type CreateACLRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // FrontendID: frontend ID to attach the ACL to. - FrontendID string `json:"-"` - // Name: ACL name. - Name string `json:"name"` - // Action: action to take when incoming traffic matches an ACL filter. - Action *ACLAction `json:"action"` - // Match: ACL match filter object. One of `ip_subnet` or `http_filter` & `http_filter_value` are required. - Match *ACLMatch `json:"match"` - // Index: priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed). - Index int32 `json:"index"` - // Description: ACL description. - Description string `json:"description"` -} - -// CreateACL: create an ACL for a given frontend. +// CreateACL: Create an ACL for a given frontend. func (s *API) CreateACL(req *CreateACLRequest, opts ...scw.RequestOption) (*ACL, error) { var err error @@ -6528,9 +7429,8 @@ func (s *API) CreateACL(req *CreateACLRequest, opts ...scw.RequestOption) (*ACL, } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/frontends/" + fmt.Sprint(req.FrontendID) + "/acls", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/frontends/" + fmt.Sprint(req.FrontendID) + "/acls", } err = scwReq.SetBody(req) @@ -6547,14 +7447,7 @@ func (s *API) CreateACL(req *CreateACLRequest, opts ...scw.RequestOption) (*ACL, return &resp, nil } -type GetACLRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // ACLID: ACL ID. - ACLID string `json:"-"` -} - -// GetACL: get an ACL. +// GetACL: Get an ACL. func (s *API) GetACL(req *GetACLRequest, opts ...scw.RequestOption) (*ACL, error) { var err error @@ -6572,9 +7465,8 @@ func (s *API) GetACL(req *GetACLRequest, opts ...scw.RequestOption) (*ACL, error } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/acls/" + fmt.Sprint(req.ACLID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/acls/" + fmt.Sprint(req.ACLID) + "", } var resp ACL @@ -6586,24 +7478,7 @@ func (s *API) GetACL(req *GetACLRequest, opts ...scw.RequestOption) (*ACL, error return &resp, nil } -type UpdateACLRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // ACLID: ACL ID. - ACLID string `json:"-"` - // Name: ACL name. - Name string `json:"name"` - // Action: action to take when incoming traffic matches an ACL filter. - Action *ACLAction `json:"action"` - // Match: ACL match filter object. One of `ip_subnet` or `http_filter` & `http_filter_value` are required. - Match *ACLMatch `json:"match"` - // Index: priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed). - Index int32 `json:"index"` - // Description: ACL description. - Description *string `json:"description"` -} - -// UpdateACL: update an ACL. +// UpdateACL: Update an ACL. func (s *API) UpdateACL(req *UpdateACLRequest, opts ...scw.RequestOption) (*ACL, error) { var err error @@ -6621,9 +7496,8 @@ func (s *API) UpdateACL(req *UpdateACLRequest, opts ...scw.RequestOption) (*ACL, } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/acls/" + fmt.Sprint(req.ACLID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/acls/" + fmt.Sprint(req.ACLID) + "", } err = scwReq.SetBody(req) @@ -6640,14 +7514,7 @@ func (s *API) UpdateACL(req *UpdateACLRequest, opts ...scw.RequestOption) (*ACL, return &resp, nil } -type DeleteACLRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // ACLID: ACL ID. - ACLID string `json:"-"` -} - -// DeleteACL: delete an ACL. +// DeleteACL: Delete an ACL. func (s *API) DeleteACL(req *DeleteACLRequest, opts ...scw.RequestOption) error { var err error @@ -6665,9 +7532,8 @@ func (s *API) DeleteACL(req *DeleteACLRequest, opts ...scw.RequestOption) error } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/acls/" + fmt.Sprint(req.ACLID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/acls/" + fmt.Sprint(req.ACLID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -6677,23 +7543,7 @@ func (s *API) DeleteACL(req *DeleteACLRequest, opts ...scw.RequestOption) error return nil } -type CreateCertificateRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // Name: name for the certificate. - Name string `json:"name"` - // Letsencrypt: object to define a new Let's Encrypt certificate to be generated. - // Precisely one of CustomCertificate, Letsencrypt must be set. - Letsencrypt *CreateCertificateRequestLetsencryptConfig `json:"letsencrypt,omitempty"` - // CustomCertificate: object to define an existing custom certificate to be imported. - // Precisely one of CustomCertificate, Letsencrypt must be set. - CustomCertificate *CreateCertificateRequestCustomCertificate `json:"custom_certificate,omitempty"` -} - -// CreateCertificate: create a TLS certificate. -// Generate a new TLS certificate using Let's Encrypt or import your certificate. +// CreateCertificate: Generate a new TLS certificate using Let's Encrypt or import your certificate. func (s *API) CreateCertificate(req *CreateCertificateRequest, opts ...scw.RequestOption) (*Certificate, error) { var err error @@ -6715,9 +7565,8 @@ func (s *API) CreateCertificate(req *CreateCertificateRequest, opts ...scw.Reque } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/certificates", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/certificates", } err = scwReq.SetBody(req) @@ -6734,23 +7583,7 @@ func (s *API) CreateCertificate(req *CreateCertificateRequest, opts ...scw.Reque return &resp, nil } -type ListCertificatesRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // OrderBy: sort order of certificates in the response. - // Default value: created_at_asc - OrderBy ListCertificatesRequestOrderBy `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: number of certificates to return. - PageSize *uint32 `json:"-"` - // Name: certificate name to filter for, only certificates of this name will be returned. - Name *string `json:"-"` -} - -// ListCertificates: list all TLS certificates on a given load balancer. +// ListCertificates: List all TLS certificates on a given load balancer. func (s *API) ListCertificates(req *ListCertificatesRequest, opts ...scw.RequestOption) (*ListCertificatesResponse, error) { var err error @@ -6779,10 +7612,9 @@ func (s *API) ListCertificates(req *ListCertificatesRequest, opts ...scw.Request } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/certificates", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/certificates", + Query: query, } var resp ListCertificatesResponse @@ -6794,14 +7626,7 @@ func (s *API) ListCertificates(req *ListCertificatesRequest, opts ...scw.Request return &resp, nil } -type GetCertificateRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // CertificateID: certificate ID. - CertificateID string `json:"-"` -} - -// GetCertificate: get a TLS certificate. +// GetCertificate: Get a TLS certificate. func (s *API) GetCertificate(req *GetCertificateRequest, opts ...scw.RequestOption) (*Certificate, error) { var err error @@ -6819,9 +7644,8 @@ func (s *API) GetCertificate(req *GetCertificateRequest, opts ...scw.RequestOpti } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/certificates/" + fmt.Sprint(req.CertificateID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/certificates/" + fmt.Sprint(req.CertificateID) + "", } var resp Certificate @@ -6833,16 +7657,7 @@ func (s *API) GetCertificate(req *GetCertificateRequest, opts ...scw.RequestOpti return &resp, nil } -type UpdateCertificateRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // CertificateID: certificate ID. - CertificateID string `json:"-"` - // Name: certificate name. - Name string `json:"name"` -} - -// UpdateCertificate: update a TLS certificate. +// UpdateCertificate: Update a TLS certificate. func (s *API) UpdateCertificate(req *UpdateCertificateRequest, opts ...scw.RequestOption) (*Certificate, error) { var err error @@ -6860,9 +7675,8 @@ func (s *API) UpdateCertificate(req *UpdateCertificateRequest, opts ...scw.Reque } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/certificates/" + fmt.Sprint(req.CertificateID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/certificates/" + fmt.Sprint(req.CertificateID) + "", } err = scwReq.SetBody(req) @@ -6879,14 +7693,7 @@ func (s *API) UpdateCertificate(req *UpdateCertificateRequest, opts ...scw.Reque return &resp, nil } -type DeleteCertificateRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // CertificateID: certificate ID. - CertificateID string `json:"-"` -} - -// DeleteCertificate: delete a TLS certificate. +// DeleteCertificate: Delete a TLS certificate. func (s *API) DeleteCertificate(req *DeleteCertificateRequest, opts ...scw.RequestOption) error { var err error @@ -6904,9 +7711,8 @@ func (s *API) DeleteCertificate(req *DeleteCertificateRequest, opts ...scw.Reque } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/certificates/" + fmt.Sprint(req.CertificateID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/certificates/" + fmt.Sprint(req.CertificateID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -6916,16 +7722,7 @@ func (s *API) DeleteCertificate(req *DeleteCertificateRequest, opts ...scw.Reque return nil } -type ListLBTypesRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: the number of items to return. - PageSize *uint32 `json:"-"` -} - -// ListLBTypes: list all load balancer offer type. +// ListLBTypes: List all load balancer offer type. func (s *API) ListLBTypes(req *ListLBTypesRequest, opts ...scw.RequestOption) (*ListLBTypesResponse, error) { var err error @@ -6948,10 +7745,9 @@ func (s *API) ListLBTypes(req *ListLBTypesRequest, opts ...scw.RequestOption) (* } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lb-types", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lb-types", + Query: query, } var resp ListLBTypesResponse @@ -6963,52 +7759,32 @@ func (s *API) ListLBTypes(req *ListLBTypesRequest, opts ...scw.RequestOption) (* return &resp, nil } -type CreateSubscriberRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // Name: subscriber name. - Name string `json:"name"` - // EmailConfig: email address configuration. - // Precisely one of EmailConfig, WebhookConfig must be set. - EmailConfig *SubscriberEmailConfig `json:"email_config,omitempty"` - // WebhookConfig: webHook URI configuration. - // Precisely one of EmailConfig, WebhookConfig must be set. - WebhookConfig *SubscriberWebhookConfig `json:"webhook_config,omitempty"` - // Deprecated: OrganizationID: organization ID to create the subscriber in. - // Precisely one of OrganizationID, ProjectID must be set. - OrganizationID *string `json:"organization_id,omitempty"` - // ProjectID: project ID to create the subscriber in. - // Precisely one of OrganizationID, ProjectID must be set. - ProjectID *string `json:"project_id,omitempty"` -} - -// CreateSubscriber: create a subscriber, webhook or email. +// CreateSubscriber: Create a subscriber, webhook or email. func (s *API) CreateSubscriber(req *CreateSubscriberRequest, opts ...scw.RequestOption) (*Subscriber, error) { var err error - defaultProjectID, exist := s.client.GetDefaultProjectID() - if exist && req.OrganizationID == nil && req.ProjectID == nil { - req.ProjectID = &defaultProjectID - } - - defaultOrganizationID, exist := s.client.GetDefaultOrganizationID() - if exist && req.OrganizationID == nil && req.ProjectID == nil { - req.OrganizationID = &defaultOrganizationID - } - if req.Region == "" { defaultRegion, _ := s.client.GetDefaultRegion() req.Region = defaultRegion } + defaultProjectID, exist := s.client.GetDefaultProjectID() + if exist && req.ProjectID == nil && req.OrganizationID == nil { + req.ProjectID = &defaultProjectID + } + + defaultOrganizationID, exist := s.client.GetDefaultOrganizationID() + if exist && req.ProjectID == nil && req.OrganizationID == nil { + req.OrganizationID = &defaultOrganizationID + } + if fmt.Sprint(req.Region) == "" { return nil, errors.New("field Region cannot be empty in request") } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/subscribers", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/subscribers", } err = scwReq.SetBody(req) @@ -7025,14 +7801,7 @@ func (s *API) CreateSubscriber(req *CreateSubscriberRequest, opts ...scw.Request return &resp, nil } -type GetSubscriberRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // SubscriberID: subscriber ID. - SubscriberID string `json:"-"` -} - -// GetSubscriber: get a subscriber. +// GetSubscriber: Get a subscriber. func (s *API) GetSubscriber(req *GetSubscriberRequest, opts ...scw.RequestOption) (*Subscriber, error) { var err error @@ -7050,9 +7819,8 @@ func (s *API) GetSubscriber(req *GetSubscriberRequest, opts ...scw.RequestOption } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/subscribers/" + fmt.Sprint(req.SubscriberID) + "", - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/subscribers/" + fmt.Sprint(req.SubscriberID) + "", } var resp Subscriber @@ -7064,25 +7832,7 @@ func (s *API) GetSubscriber(req *GetSubscriberRequest, opts ...scw.RequestOption return &resp, nil } -type ListSubscriberRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // OrderBy: sort order of subscribers in the response. - // Default value: created_at_asc - OrderBy ListSubscriberRequestOrderBy `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` - // PageSize: the number of items to return. - PageSize *uint32 `json:"-"` - // Name: subscriber name to search for. - Name *string `json:"-"` - // OrganizationID: filter subscribers by Organization ID. - OrganizationID *string `json:"-"` - // ProjectID: filter subscribers by Project ID. - ProjectID *string `json:"-"` -} - -// ListSubscriber: list all subscriber. +// ListSubscriber: List all subscriber. func (s *API) ListSubscriber(req *ListSubscriberRequest, opts ...scw.RequestOption) (*ListSubscriberResponse, error) { var err error @@ -7109,10 +7859,9 @@ func (s *API) ListSubscriber(req *ListSubscriberRequest, opts ...scw.RequestOpti } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/subscribers", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/subscribers", + Query: query, } var resp ListSubscriberResponse @@ -7124,22 +7873,7 @@ func (s *API) ListSubscriber(req *ListSubscriberRequest, opts ...scw.RequestOpti return &resp, nil } -type UpdateSubscriberRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // SubscriberID: subscriber ID. - SubscriberID string `json:"-"` - // Name: subscriber name. - Name string `json:"name"` - // EmailConfig: email address configuration. - // Precisely one of EmailConfig, WebhookConfig must be set. - EmailConfig *SubscriberEmailConfig `json:"email_config,omitempty"` - // WebhookConfig: webhook URI configuration. - // Precisely one of EmailConfig, WebhookConfig must be set. - WebhookConfig *SubscriberWebhookConfig `json:"webhook_config,omitempty"` -} - -// UpdateSubscriber: update a subscriber. +// UpdateSubscriber: Update a subscriber. func (s *API) UpdateSubscriber(req *UpdateSubscriberRequest, opts ...scw.RequestOption) (*Subscriber, error) { var err error @@ -7157,9 +7891,8 @@ func (s *API) UpdateSubscriber(req *UpdateSubscriberRequest, opts ...scw.Request } scwReq := &scw.ScalewayRequest{ - Method: "PUT", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/subscribers/" + fmt.Sprint(req.SubscriberID) + "", - Headers: http.Header{}, + Method: "PUT", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/subscribers/" + fmt.Sprint(req.SubscriberID) + "", } err = scwReq.SetBody(req) @@ -7176,14 +7909,7 @@ func (s *API) UpdateSubscriber(req *UpdateSubscriberRequest, opts ...scw.Request return &resp, nil } -type DeleteSubscriberRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // SubscriberID: subscriber ID. - SubscriberID string `json:"-"` -} - -// DeleteSubscriber: delete a subscriber. +// DeleteSubscriber: Delete a subscriber. func (s *API) DeleteSubscriber(req *DeleteSubscriberRequest, opts ...scw.RequestOption) error { var err error @@ -7201,9 +7927,8 @@ func (s *API) DeleteSubscriber(req *DeleteSubscriberRequest, opts ...scw.Request } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lb/subscriber/" + fmt.Sprint(req.SubscriberID) + "", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lb/subscriber/" + fmt.Sprint(req.SubscriberID) + "", } err = s.client.Do(scwReq, nil, opts...) @@ -7213,16 +7938,7 @@ func (s *API) DeleteSubscriber(req *DeleteSubscriberRequest, opts ...scw.Request return nil } -type SubscribeToLBRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // SubscriberID: subscriber ID. - SubscriberID string `json:"subscriber_id"` -} - -// SubscribeToLB: subscribe a subscriber to a given load balancer. +// SubscribeToLB: Subscribe a subscriber to a given load balancer. func (s *API) SubscribeToLB(req *SubscribeToLBRequest, opts ...scw.RequestOption) (*LB, error) { var err error @@ -7240,9 +7956,8 @@ func (s *API) SubscribeToLB(req *SubscribeToLBRequest, opts ...scw.RequestOption } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lb/" + fmt.Sprint(req.LBID) + "/subscribe", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lb/" + fmt.Sprint(req.LBID) + "/subscribe", } err = scwReq.SetBody(req) @@ -7259,14 +7974,7 @@ func (s *API) SubscribeToLB(req *SubscribeToLBRequest, opts ...scw.RequestOption return &resp, nil } -type UnsubscribeFromLBRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` -} - -// UnsubscribeFromLB: unsubscribe a subscriber from a given load balancer. +// UnsubscribeFromLB: Unsubscribe a subscriber from a given load balancer. func (s *API) UnsubscribeFromLB(req *UnsubscribeFromLBRequest, opts ...scw.RequestOption) (*LB, error) { var err error @@ -7284,9 +7992,8 @@ func (s *API) UnsubscribeFromLB(req *UnsubscribeFromLBRequest, opts ...scw.Reque } scwReq := &scw.ScalewayRequest{ - Method: "DELETE", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lb/" + fmt.Sprint(req.LBID) + "/unsubscribe", - Headers: http.Header{}, + Method: "DELETE", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lb/" + fmt.Sprint(req.LBID) + "/unsubscribe", } var resp LB @@ -7298,21 +8005,7 @@ func (s *API) UnsubscribeFromLB(req *UnsubscribeFromLBRequest, opts ...scw.Reque return &resp, nil } -type ListLBPrivateNetworksRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // OrderBy: sort order of Private Network objects in the response. - // Default value: created_at_asc - OrderBy ListPrivateNetworksRequestOrderBy `json:"-"` - // PageSize: number of objects to return. - PageSize *uint32 `json:"-"` - // Page: the page number to return, from the paginated results. - Page *int32 `json:"-"` -} - -// ListLBPrivateNetworks: list attached private network of load balancer. +// ListLBPrivateNetworks: List attached private network of load balancer. func (s *API) ListLBPrivateNetworks(req *ListLBPrivateNetworksRequest, opts ...scw.RequestOption) (*ListLBPrivateNetworksResponse, error) { var err error @@ -7340,10 +8033,9 @@ func (s *API) ListLBPrivateNetworks(req *ListLBPrivateNetworksRequest, opts ...s } scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/private-networks", - Query: query, - Headers: http.Header{}, + Method: "GET", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/private-networks", + Query: query, } var resp ListLBPrivateNetworksResponse @@ -7355,25 +8047,7 @@ func (s *API) ListLBPrivateNetworks(req *ListLBPrivateNetworksRequest, opts ...s return &resp, nil } -type AttachPrivateNetworkRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load Balancer ID. - LBID string `json:"-"` - // PrivateNetworkID: private Network ID. - PrivateNetworkID string `json:"-"` - // Deprecated: StaticConfig: object containing an array of a local IP address for the Load Balancer on this Private Network. - // Precisely one of DHCPConfig, IpamConfig, StaticConfig must be set. - StaticConfig *PrivateNetworkStaticConfig `json:"static_config,omitempty"` - // DHCPConfig: defines whether to let DHCP assign IP addresses. - // Precisely one of DHCPConfig, IpamConfig, StaticConfig must be set. - DHCPConfig *PrivateNetworkDHCPConfig `json:"dhcp_config,omitempty"` - // Deprecated: IpamConfig: for internal use only. - // Precisely one of DHCPConfig, IpamConfig, StaticConfig must be set. - IpamConfig *PrivateNetworkIpamConfig `json:"ipam_config,omitempty"` -} - -// AttachPrivateNetwork: add load balancer on instance private network. +// AttachPrivateNetwork: Add load balancer on instance private network. func (s *API) AttachPrivateNetwork(req *AttachPrivateNetworkRequest, opts ...scw.RequestOption) (*PrivateNetwork, error) { var err error @@ -7395,9 +8069,8 @@ func (s *API) AttachPrivateNetwork(req *AttachPrivateNetworkRequest, opts ...scw } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/private-networks/" + fmt.Sprint(req.PrivateNetworkID) + "/attach", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/private-networks/" + fmt.Sprint(req.PrivateNetworkID) + "/attach", } err = scwReq.SetBody(req) @@ -7414,16 +8087,7 @@ func (s *API) AttachPrivateNetwork(req *AttachPrivateNetworkRequest, opts ...scw return &resp, nil } -type DetachPrivateNetworkRequest struct { - // Region: region to target. If none is passed will use default region from the config. - Region scw.Region `json:"-"` - // LBID: load balancer ID. - LBID string `json:"-"` - // PrivateNetworkID: set your instance private network id. - PrivateNetworkID string `json:"-"` -} - -// DetachPrivateNetwork: remove load balancer of private network. +// DetachPrivateNetwork: Remove load balancer of private network. func (s *API) DetachPrivateNetwork(req *DetachPrivateNetworkRequest, opts ...scw.RequestOption) error { var err error @@ -7445,9 +8109,8 @@ func (s *API) DetachPrivateNetwork(req *DetachPrivateNetworkRequest, opts ...scw } scwReq := &scw.ScalewayRequest{ - Method: "POST", - Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/private-networks/" + fmt.Sprint(req.PrivateNetworkID) + "/detach", - Headers: http.Header{}, + Method: "POST", + Path: "/lb/v1/regions/" + fmt.Sprint(req.Region) + "/lbs/" + fmt.Sprint(req.LBID) + "/private-networks/" + fmt.Sprint(req.PrivateNetworkID) + "/detach", } err = scwReq.SetBody(req) @@ -7461,212 +8124,3 @@ func (s *API) DetachPrivateNetwork(req *DetachPrivateNetworkRequest, opts ...scw } return nil } - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListLBsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListLBsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListLBsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.LBs = append(r.LBs, results.LBs...) - r.TotalCount += uint32(len(results.LBs)) - return uint32(len(results.LBs)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListIPsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListIPsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListIPsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.IPs = append(r.IPs, results.IPs...) - r.TotalCount += uint32(len(results.IPs)) - return uint32(len(results.IPs)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListBackendsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListBackendsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListBackendsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Backends = append(r.Backends, results.Backends...) - r.TotalCount += uint32(len(results.Backends)) - return uint32(len(results.Backends)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListFrontendsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListFrontendsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListFrontendsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Frontends = append(r.Frontends, results.Frontends...) - r.TotalCount += uint32(len(results.Frontends)) - return uint32(len(results.Frontends)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListRoutesResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListRoutesResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListRoutesResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Routes = append(r.Routes, results.Routes...) - r.TotalCount += uint32(len(results.Routes)) - return uint32(len(results.Routes)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListBackendStatsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListBackendStatsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListBackendStatsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.BackendServersStats = append(r.BackendServersStats, results.BackendServersStats...) - r.TotalCount += uint32(len(results.BackendServersStats)) - return uint32(len(results.BackendServersStats)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListACLResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListACLResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListACLResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.ACLs = append(r.ACLs, results.ACLs...) - r.TotalCount += uint32(len(results.ACLs)) - return uint32(len(results.ACLs)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListCertificatesResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListCertificatesResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListCertificatesResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Certificates = append(r.Certificates, results.Certificates...) - r.TotalCount += uint32(len(results.Certificates)) - return uint32(len(results.Certificates)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListLBTypesResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListLBTypesResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListLBTypesResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.LBTypes = append(r.LBTypes, results.LBTypes...) - r.TotalCount += uint32(len(results.LBTypes)) - return uint32(len(results.LBTypes)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListSubscriberResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListSubscriberResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListSubscriberResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.Subscribers = append(r.Subscribers, results.Subscribers...) - r.TotalCount += uint32(len(results.Subscribers)) - return uint32(len(results.Subscribers)), nil -} - -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListLBPrivateNetworksResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount -} - -// UnsafeAppend should not be used -// Internal usage only -func (r *ListLBPrivateNetworksResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListLBPrivateNetworksResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } - - r.PrivateNetwork = append(r.PrivateNetwork, results.PrivateNetwork...) - r.TotalCount += uint32(len(results.PrivateNetwork)) - return uint32(len(results.PrivateNetwork)), nil -} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v1/marketplace_sdk.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v1/marketplace_sdk.go index b7a2311f88..b430fe8b00 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v1/marketplace_sdk.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v1/marketplace_sdk.go @@ -39,80 +39,22 @@ var ( _ = namegenerator.GetRandomName ) -// API: marketplace API. -type API struct { - client *scw.Client -} - -// NewAPI returns a API object from a Scaleway client. -func NewAPI(client *scw.Client) *API { - return &API{ - client: client, - } -} - -type GetImageResponse struct { - Image *Image `json:"image"` -} - -type GetVersionResponse struct { - Version *Version `json:"version"` -} - -// Image: image. -type Image struct { - // ID: UUID of this image. - ID string `json:"id"` - // Name: name of the image. - Name string `json:"name"` - // Description: text description of this image. - Description string `json:"description"` - // Logo: URL of this image's logo. - Logo string `json:"logo"` - // Categories: list of categories this image belongs to. - Categories []string `json:"categories"` - // CreationDate: creation date of this image. - CreationDate *time.Time `json:"creation_date"` - // ModificationDate: date of the last modification of this image. - ModificationDate *time.Time `json:"modification_date"` - // ValidUntil: expiration date of this image. - ValidUntil *time.Time `json:"valid_until"` - // Label: label of this image. - // Typically an identifier for a distribution (ex. "ubuntu_focal"). - Label string `json:"label"` - // Versions: list of versions of this image. - Versions []*Version `json:"versions"` - // Organization: organization this image belongs to. - Organization *Organization `json:"organization"` - - CurrentPublicVersion string `json:"current_public_version"` -} - -type ListImagesResponse struct { - Images []*Image `json:"images"` - - TotalCount uint32 `json:"total_count"` -} - -type ListVersionsResponse struct { - Versions []*Version `json:"versions"` - - TotalCount uint32 `json:"total_count"` -} - // LocalImage: local image. type LocalImage struct { - // ID: UUID of this local image. - // Version you will typically use to define an image in an API call. + // ID: version you will typically use to define an image in an API call. ID string `json:"id"` + // CompatibleCommercialTypes: list of all commercial types that are compatible with this local image. CompatibleCommercialTypes []string `json:"compatible_commercial_types"` + // Arch: supported architecture for this local image. Arch string `json:"arch"` + // Zone: availability Zone where this local image is available. Zone scw.Zone `json:"zone"` } +// Organization: organization. type Organization struct { ID string `json:"id"` @@ -123,138 +65,83 @@ type Organization struct { type Version struct { // ID: UUID of this version. ID string `json:"id"` + // Name: name of this version. Name string `json:"name"` + // CreationDate: creation date of this image version. CreationDate *time.Time `json:"creation_date"` + // ModificationDate: date of the last modification of this version. ModificationDate *time.Time `json:"modification_date"` + // LocalImages: list of local images available in this version. LocalImages []*LocalImage `json:"local_images"` } -// Service API +// Image: image. +type Image struct { + // ID: UUID of this image. + ID string `json:"id"` -type ListImagesRequest struct { - // PerPage: a positive integer lower or equal to 100 to select the number of items to display. - PerPage *uint32 `json:"-"` - // Page: a positive integer to choose the page to display. - Page *int32 `json:"-"` -} - -// ListImages: list marketplace images. -func (s *API) ListImages(req *ListImagesRequest, opts ...scw.RequestOption) (*ListImagesResponse, error) { - var err error - - defaultPerPage, exist := s.client.GetDefaultPageSize() - if (req.PerPage == nil || *req.PerPage == 0) && exist { - req.PerPage = &defaultPerPage - } - - query := url.Values{} - parameter.AddToQuery(query, "per_page", req.PerPage) - parameter.AddToQuery(query, "page", req.Page) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/marketplace/v1/images", - Query: query, - Headers: http.Header{}, - } - - var resp ListImagesResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil + // Name: name of the image. + Name string `json:"name"` + + // Description: text description of this image. + Description string `json:"description"` + + // Logo: URL of this image's logo. + Logo string `json:"logo"` + + // Categories: list of categories this image belongs to. + Categories []string `json:"categories"` + + // CreationDate: creation date of this image. + CreationDate *time.Time `json:"creation_date"` + + // ModificationDate: date of the last modification of this image. + ModificationDate *time.Time `json:"modification_date"` + + // ValidUntil: expiration date of this image. + ValidUntil *time.Time `json:"valid_until"` + + // Label: typically an identifier for a distribution (ex. "ubuntu_focal"). + Label string `json:"label"` + + // Versions: list of versions of this image. + Versions []*Version `json:"versions"` + + // Organization: organization this image belongs to. + Organization *Organization `json:"organization"` + + CurrentPublicVersion string `json:"current_public_version"` } +// GetImageRequest: get image request. type GetImageRequest struct { // ImageID: display the image name. ImageID string `json:"-"` } -// GetImage: get a specific marketplace image. -func (s *API) GetImage(req *GetImageRequest, opts ...scw.RequestOption) (*GetImageResponse, error) { - var err error - - if fmt.Sprint(req.ImageID) == "" { - return nil, errors.New("field ImageID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/marketplace/v1/images/" + fmt.Sprint(req.ImageID) + "", - Headers: http.Header{}, - } - - var resp GetImageResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil +// GetImageResponse: get image response. +type GetImageResponse struct { + Image *Image `json:"image"` } -type ListVersionsRequest struct { - ImageID string `json:"-"` +// ListImagesRequest: list images request. +type ListImagesRequest struct { + // PerPage: a positive integer lower or equal to 100 to select the number of items to display. + PerPage *uint32 `json:"-"` + + // Page: a positive integer to choose the page to display. + Page *int32 `json:"-"` } -func (s *API) ListVersions(req *ListVersionsRequest, opts ...scw.RequestOption) (*ListVersionsResponse, error) { - var err error +// ListImagesResponse: list images response. +type ListImagesResponse struct { + Images []*Image `json:"images"` - if fmt.Sprint(req.ImageID) == "" { - return nil, errors.New("field ImageID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/marketplace/v1/images/" + fmt.Sprint(req.ImageID) + "/versions", - Headers: http.Header{}, - } - - var resp ListVersionsResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type GetVersionRequest struct { - ImageID string `json:"-"` - - VersionID string `json:"-"` -} - -func (s *API) GetVersion(req *GetVersionRequest, opts ...scw.RequestOption) (*GetVersionResponse, error) { - var err error - - if fmt.Sprint(req.ImageID) == "" { - return nil, errors.New("field ImageID cannot be empty in request") - } - - if fmt.Sprint(req.VersionID) == "" { - return nil, errors.New("field VersionID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/marketplace/v1/images/" + fmt.Sprint(req.ImageID) + "/versions/" + fmt.Sprint(req.VersionID) + "", - Headers: http.Header{}, - } - - var resp GetVersionResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil + TotalCount uint32 `json:"total_count"` } // UnsafeGetTotalCount should not be used @@ -275,3 +162,60 @@ func (r *ListImagesResponse) UnsafeAppend(res interface{}) (uint32, error) { r.TotalCount += uint32(len(results.Images)) return uint32(len(results.Images)), nil } + +// Marketplace API. +type API struct { + client *scw.Client +} + +// NewAPI returns a API object from a Scaleway client. +func NewAPI(client *scw.Client) *API { + return &API{ + client: client, + } +} + +// ListImages: List marketplace images. +func (s *API) ListImages(req *ListImagesRequest, opts ...scw.RequestOption) (*ListImagesResponse, error) { + var err error + + query := url.Values{} + parameter.AddToQuery(query, "per_page", req.PerPage) + parameter.AddToQuery(query, "page", req.Page) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/marketplace/v1/images", + Query: query, + } + + var resp ListImagesResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetImage: Get a specific marketplace image. +func (s *API) GetImage(req *GetImageRequest, opts ...scw.RequestOption) (*GetImageResponse, error) { + var err error + + if fmt.Sprint(req.ImageID) == "" { + return nil, errors.New("field ImageID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/marketplace/v1/images/" + fmt.Sprint(req.ImageID) + "", + } + + var resp GetImageResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v1/marketplace_utils.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v1/marketplace_utils.go index 7d999832b6..f84175e49e 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v1/marketplace_utils.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v1/marketplace_utils.go @@ -91,9 +91,3 @@ func (s *API) GetLocalImageIDByLabel(req *GetLocalImageIDByLabelRequest, opts .. func (r *ListImagesResponse) UnsafeSetTotalCount(totalCount int) { r.TotalCount = uint32(totalCount) } - -// UnsafeSetTotalCount should not be used -// Internal usage only -func (r *ListVersionsResponse) UnsafeSetTotalCount(totalCount int) { - r.TotalCount = uint32(totalCount) -} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v2/marketplace_sdk.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v2/marketplace_sdk.go index 48ff058b61..003547c3a7 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v2/marketplace_sdk.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/marketplace/v2/marketplace_sdk.go @@ -39,18 +39,6 @@ var ( _ = namegenerator.GetRandomName ) -// API: marketplace API. -type API struct { - client *scw.Client -} - -// NewAPI returns a API object from a Scaleway client. -func NewAPI(client *scw.Client) *API { - return &API{ - client: client, - } -} - type ListImagesRequestOrderBy string const ( @@ -148,7 +136,7 @@ func (enum *ListVersionsRequestOrderBy) UnmarshalJSON(data []byte) error { type LocalImageType string const ( - // Unspecified image type + // Unspecified image type. LocalImageTypeUnknownType = LocalImageType("unknown_type") // An image type that can be used to create volumes which are managed via the Instance API. LocalImageTypeInstanceLocal = LocalImageType("instance_local") @@ -179,6 +167,7 @@ func (enum *LocalImageType) UnmarshalJSON(data []byte) error { return nil } +// Category: category. type Category struct { ID string `json:"id"` @@ -191,62 +180,49 @@ type Category struct { type Image struct { // ID: UUID of this image. ID string `json:"id"` + // Name: name of the image. Name string `json:"name"` + // Description: text description of this image. Description string `json:"description"` + // Logo: URL of this image's logo. Logo string `json:"logo"` + // Categories: list of categories this image belongs to. Categories []string `json:"categories"` + // CreatedAt: creation date of this image. CreatedAt *time.Time `json:"created_at"` + // UpdatedAt: date of the last modification of this image. UpdatedAt *time.Time `json:"updated_at"` + // ValidUntil: expiration date of this image. ValidUntil *time.Time `json:"valid_until"` - // Label: label of this image. - // Typically an identifier for a distribution (ex. "ubuntu_focal"). + + // Label: typically an identifier for a distribution (ex. "ubuntu_focal"). Label string `json:"label"` } -type ListCategoriesResponse struct { - Categories []*Category `json:"categories"` - - TotalCount uint32 `json:"total_count"` -} - -type ListImagesResponse struct { - Images []*Image `json:"images"` - - TotalCount uint32 `json:"total_count"` -} - -type ListLocalImagesResponse struct { - LocalImages []*LocalImage `json:"local_images"` - - TotalCount uint32 `json:"total_count"` -} - -type ListVersionsResponse struct { - Versions []*Version `json:"versions"` - - TotalCount uint32 `json:"total_count"` -} - // LocalImage: local image. type LocalImage struct { - // ID: UUID of this local image. - // Version you will typically use to define an image in an API call. + // ID: version you will typically use to define an image in an API call. ID string `json:"id"` + // CompatibleCommercialTypes: list of all commercial types that are compatible with this local image. CompatibleCommercialTypes []string `json:"compatible_commercial_types"` + // Arch: supported architecture for this local image. Arch string `json:"arch"` + // Zone: availability Zone where this local image is available. Zone scw.Zone `json:"zone"` + // Label: image label this image belongs to. Label string `json:"label"` + // Type: type of this local image. // Default value: unknown_type Type LocalImageType `json:"type"` @@ -256,316 +232,101 @@ type LocalImage struct { type Version struct { // ID: UUID of this version. ID string `json:"id"` + // Name: name of this version. Name string `json:"name"` + // CreatedAt: creation date of this image version. CreatedAt *time.Time `json:"created_at"` + // UpdatedAt: date of the last modification of this version. UpdatedAt *time.Time `json:"updated_at"` + // PublishedAt: date this version was officially published. PublishedAt *time.Time `json:"published_at"` } -// Service API - -type ListImagesRequest struct { - // PageSize: a positive integer lower or equal to 100 to select the number of items to display. - PageSize *uint32 `json:"-"` - // Page: a positive integer to choose the page to display. - Page *int32 `json:"-"` - // OrderBy: ordering to use. - // Default value: name_asc - OrderBy ListImagesRequestOrderBy `json:"-"` - // Arch: choose for which machine architecture to return images. - Arch *string `json:"-"` - // Category: choose the category of images to get. - Category *string `json:"-"` - // IncludeEol: choose to include end-of-life images. - IncludeEol bool `json:"-"` -} - -// ListImages: list marketplace images. -// List all available images on the marketplace, their UUID, CPU architecture and description. -func (s *API) ListImages(req *ListImagesRequest, opts ...scw.RequestOption) (*ListImagesResponse, error) { - var err error - - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "page", req.Page) - parameter.AddToQuery(query, "order_by", req.OrderBy) - parameter.AddToQuery(query, "arch", req.Arch) - parameter.AddToQuery(query, "category", req.Category) - parameter.AddToQuery(query, "include_eol", req.IncludeEol) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/marketplace/v2/images", - Query: query, - Headers: http.Header{}, - } - - var resp ListImagesResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil +// GetCategoryRequest: get category request. +type GetCategoryRequest struct { + CategoryID string `json:"-"` } +// GetImageRequest: get image request. type GetImageRequest struct { // ImageID: display the image name. ImageID string `json:"-"` } -// GetImage: get a specific marketplace image. -// Get detailed information about a marketplace image, specified by its `image_id` (UUID format). -func (s *API) GetImage(req *GetImageRequest, opts ...scw.RequestOption) (*Image, error) { - var err error - - if fmt.Sprint(req.ImageID) == "" { - return nil, errors.New("field ImageID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/marketplace/v2/images/" + fmt.Sprint(req.ImageID) + "", - Headers: http.Header{}, - } - - var resp Image - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type ListVersionsRequest struct { - ImageID string `json:"-"` - - PageSize *uint32 `json:"-"` - - Page *int32 `json:"-"` - // OrderBy: default value: created_at_asc - OrderBy ListVersionsRequestOrderBy `json:"-"` -} - -// ListVersions: list versions of an Image. -// Get a list of all available version of an image, specified by its `image_id` (UUID format). -func (s *API) ListVersions(req *ListVersionsRequest, opts ...scw.RequestOption) (*ListVersionsResponse, error) { - var err error - - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "image_id", req.ImageID) - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "page", req.Page) - parameter.AddToQuery(query, "order_by", req.OrderBy) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/marketplace/v2/versions", - Query: query, - Headers: http.Header{}, - } - - var resp ListVersionsResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type GetVersionRequest struct { - VersionID string `json:"-"` -} - -// GetVersion: get a specific image version. -// Get information such as the name, creation date, last update and published date for an image version specified by its `version_id` (UUID format). -func (s *API) GetVersion(req *GetVersionRequest, opts ...scw.RequestOption) (*Version, error) { - var err error - - if fmt.Sprint(req.VersionID) == "" { - return nil, errors.New("field VersionID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/marketplace/v2/versions/" + fmt.Sprint(req.VersionID) + "", - Headers: http.Header{}, - } - - var resp Version - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - -type ListLocalImagesRequest struct { - ImageID *string `json:"-"` - - VersionID *string `json:"-"` - - PageSize *uint32 `json:"-"` - - Page *int32 `json:"-"` - // OrderBy: default value: created_at_asc - OrderBy ListLocalImagesRequestOrderBy `json:"-"` - - ImageLabel *string `json:"-"` - - Zone *scw.Zone `json:"-"` - // Type: default value: unknown_type - Type LocalImageType `json:"-"` -} - -// ListLocalImages: list local images from a specific image or version. -// List information about local images in a specific Availability Zone, specified by its `image_id` (UUID format), `version_id` (UUID format) or `image_label`. Only one of these three parameters may be set. -func (s *API) ListLocalImages(req *ListLocalImagesRequest, opts ...scw.RequestOption) (*ListLocalImagesResponse, error) { - var err error - - defaultZone, exist := s.client.GetDefaultZone() - if (req.Zone == nil || *req.Zone == "") && exist { - req.Zone = &defaultZone - } - - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "image_id", req.ImageID) - parameter.AddToQuery(query, "version_id", req.VersionID) - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "page", req.Page) - parameter.AddToQuery(query, "order_by", req.OrderBy) - parameter.AddToQuery(query, "image_label", req.ImageLabel) - parameter.AddToQuery(query, "zone", req.Zone) - parameter.AddToQuery(query, "type", req.Type) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/marketplace/v2/local-images", - Query: query, - Headers: http.Header{}, - } - - var resp ListLocalImagesResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil -} - +// GetLocalImageRequest: get local image request. type GetLocalImageRequest struct { LocalImageID string `json:"-"` } -// GetLocalImage: get a specific local image by ID. -// Get detailed information about a local image, including compatible commercial types, supported architecture, labels and the Availability Zone of the image, specified by its `local_image_id` (UUID format). -func (s *API) GetLocalImage(req *GetLocalImageRequest, opts ...scw.RequestOption) (*LocalImage, error) { - var err error - - if fmt.Sprint(req.LocalImageID) == "" { - return nil, errors.New("field LocalImageID cannot be empty in request") - } - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/marketplace/v2/local-images/" + fmt.Sprint(req.LocalImageID) + "", - Headers: http.Header{}, - } - - var resp LocalImage - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil +// GetVersionRequest: get version request. +type GetVersionRequest struct { + VersionID string `json:"-"` } +// ListCategoriesRequest: list categories request. type ListCategoriesRequest struct { PageSize *uint32 `json:"-"` Page *int32 `json:"-"` } -// ListCategories: list existing image categories. -// Get a list of all existing categories. The output can be paginated. -func (s *API) ListCategories(req *ListCategoriesRequest, opts ...scw.RequestOption) (*ListCategoriesResponse, error) { - var err error +// ListCategoriesResponse: list categories response. +type ListCategoriesResponse struct { + Categories []*Category `json:"categories"` - defaultPageSize, exist := s.client.GetDefaultPageSize() - if (req.PageSize == nil || *req.PageSize == 0) && exist { - req.PageSize = &defaultPageSize - } - - query := url.Values{} - parameter.AddToQuery(query, "page_size", req.PageSize) - parameter.AddToQuery(query, "page", req.Page) - - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/marketplace/v2/categories", - Query: query, - Headers: http.Header{}, - } - - var resp ListCategoriesResponse - - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil + TotalCount uint32 `json:"total_count"` } -type GetCategoryRequest struct { - CategoryID string `json:"-"` +// UnsafeGetTotalCount should not be used +// Internal usage only +func (r *ListCategoriesResponse) UnsafeGetTotalCount() uint32 { + return r.TotalCount } -// GetCategory: get a specific category. -// Get information about a specific category of the marketplace catalog, specified by its `category_id` (UUID format). -func (s *API) GetCategory(req *GetCategoryRequest, opts ...scw.RequestOption) (*Category, error) { - var err error - - if fmt.Sprint(req.CategoryID) == "" { - return nil, errors.New("field CategoryID cannot be empty in request") +// UnsafeAppend should not be used +// Internal usage only +func (r *ListCategoriesResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListCategoriesResponse) + if !ok { + return 0, errors.New("%T type cannot be appended to type %T", res, r) } - scwReq := &scw.ScalewayRequest{ - Method: "GET", - Path: "/marketplace/v2/categories/" + fmt.Sprint(req.CategoryID) + "", - Headers: http.Header{}, - } + r.Categories = append(r.Categories, results.Categories...) + r.TotalCount += uint32(len(results.Categories)) + return uint32(len(results.Categories)), nil +} - var resp Category +// ListImagesRequest: list images request. +type ListImagesRequest struct { + // PageSize: a positive integer lower or equal to 100 to select the number of items to display. + PageSize *uint32 `json:"-"` - err = s.client.Do(scwReq, &resp, opts...) - if err != nil { - return nil, err - } - return &resp, nil + // Page: a positive integer to choose the page to display. + Page *int32 `json:"-"` + + // OrderBy: ordering to use. + // Default value: name_asc + OrderBy ListImagesRequestOrderBy `json:"-"` + + // Arch: choose for which machine architecture to return images. + Arch *string `json:"-"` + + // Category: choose the category of images to get. + Category *string `json:"-"` + + // IncludeEol: choose to include end-of-life images. + IncludeEol bool `json:"-"` +} + +// ListImagesResponse: list images response. +type ListImagesResponse struct { + Images []*Image `json:"images"` + + TotalCount uint32 `json:"total_count"` } // UnsafeGetTotalCount should not be used @@ -587,23 +348,36 @@ func (r *ListImagesResponse) UnsafeAppend(res interface{}) (uint32, error) { return uint32(len(results.Images)), nil } -// UnsafeGetTotalCount should not be used -// Internal usage only -func (r *ListVersionsResponse) UnsafeGetTotalCount() uint32 { - return r.TotalCount +// ListLocalImagesRequest: list local images request. +type ListLocalImagesRequest struct { + // Precisely one of ImageID, VersionID, ImageLabel must be set. + ImageID *string `json:"image_id,omitempty"` + + // Precisely one of ImageID, VersionID, ImageLabel must be set. + VersionID *string `json:"version_id,omitempty"` + + PageSize *uint32 `json:"-"` + + Page *int32 `json:"-"` + + // OrderBy: default value: created_at_asc + OrderBy ListLocalImagesRequestOrderBy `json:"-"` + + // Precisely one of ImageID, VersionID, ImageLabel must be set. + ImageLabel *string `json:"image_label,omitempty"` + + // Zone: zone to target. If none is passed will use default zone from the config. + Zone *scw.Zone `json:"-"` + + // Type: default value: unknown_type + Type LocalImageType `json:"-"` } -// UnsafeAppend should not be used -// Internal usage only -func (r *ListVersionsResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListVersionsResponse) - if !ok { - return 0, errors.New("%T type cannot be appended to type %T", res, r) - } +// ListLocalImagesResponse: list local images response. +type ListLocalImagesResponse struct { + LocalImages []*LocalImage `json:"local_images"` - r.Versions = append(r.Versions, results.Versions...) - r.TotalCount += uint32(len(results.Versions)) - return uint32(len(results.Versions)), nil + TotalCount uint32 `json:"total_count"` } // UnsafeGetTotalCount should not be used @@ -625,21 +399,268 @@ func (r *ListLocalImagesResponse) UnsafeAppend(res interface{}) (uint32, error) return uint32(len(results.LocalImages)), nil } +// ListVersionsRequest: list versions request. +type ListVersionsRequest struct { + ImageID string `json:"-"` + + PageSize *uint32 `json:"-"` + + Page *int32 `json:"-"` + + // OrderBy: default value: created_at_asc + OrderBy ListVersionsRequestOrderBy `json:"-"` +} + +// ListVersionsResponse: list versions response. +type ListVersionsResponse struct { + Versions []*Version `json:"versions"` + + TotalCount uint32 `json:"total_count"` +} + // UnsafeGetTotalCount should not be used // Internal usage only -func (r *ListCategoriesResponse) UnsafeGetTotalCount() uint32 { +func (r *ListVersionsResponse) UnsafeGetTotalCount() uint32 { return r.TotalCount } // UnsafeAppend should not be used // Internal usage only -func (r *ListCategoriesResponse) UnsafeAppend(res interface{}) (uint32, error) { - results, ok := res.(*ListCategoriesResponse) +func (r *ListVersionsResponse) UnsafeAppend(res interface{}) (uint32, error) { + results, ok := res.(*ListVersionsResponse) if !ok { return 0, errors.New("%T type cannot be appended to type %T", res, r) } - r.Categories = append(r.Categories, results.Categories...) - r.TotalCount += uint32(len(results.Categories)) - return uint32(len(results.Categories)), nil + r.Versions = append(r.Versions, results.Versions...) + r.TotalCount += uint32(len(results.Versions)) + return uint32(len(results.Versions)), nil +} + +type API struct { + client *scw.Client +} + +// NewAPI returns a API object from a Scaleway client. +func NewAPI(client *scw.Client) *API { + return &API{ + client: client, + } +} + +// ListImages: List all available images on the marketplace, their UUID, CPU architecture and description. +func (s *API) ListImages(req *ListImagesRequest, opts ...scw.RequestOption) (*ListImagesResponse, error) { + var err error + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + query := url.Values{} + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "arch", req.Arch) + parameter.AddToQuery(query, "category", req.Category) + parameter.AddToQuery(query, "include_eol", req.IncludeEol) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/marketplace/v2/images", + Query: query, + } + + var resp ListImagesResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetImage: Get detailed information about a marketplace image, specified by its `image_id` (UUID format). +func (s *API) GetImage(req *GetImageRequest, opts ...scw.RequestOption) (*Image, error) { + var err error + + if fmt.Sprint(req.ImageID) == "" { + return nil, errors.New("field ImageID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/marketplace/v2/images/" + fmt.Sprint(req.ImageID) + "", + } + + var resp Image + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// ListVersions: Get a list of all available version of an image, specified by its `image_id` (UUID format). +func (s *API) ListVersions(req *ListVersionsRequest, opts ...scw.RequestOption) (*ListVersionsResponse, error) { + var err error + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + query := url.Values{} + parameter.AddToQuery(query, "image_id", req.ImageID) + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "order_by", req.OrderBy) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/marketplace/v2/versions", + Query: query, + } + + var resp ListVersionsResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetVersion: Get information such as the name, creation date, last update and published date for an image version specified by its `version_id` (UUID format). +func (s *API) GetVersion(req *GetVersionRequest, opts ...scw.RequestOption) (*Version, error) { + var err error + + if fmt.Sprint(req.VersionID) == "" { + return nil, errors.New("field VersionID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/marketplace/v2/versions/" + fmt.Sprint(req.VersionID) + "", + } + + var resp Version + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// ListLocalImages: List information about local images in a specific Availability Zone, specified by its `image_id` (UUID format), `version_id` (UUID format) or `image_label`. Only one of these three parameters may be set. +func (s *API) ListLocalImages(req *ListLocalImagesRequest, opts ...scw.RequestOption) (*ListLocalImagesResponse, error) { + var err error + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + defaultZone, exist := s.client.GetDefaultZone() + if (req.Zone == nil || *req.Zone == "") && exist { + req.Zone = &defaultZone + } + + query := url.Values{} + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "page", req.Page) + parameter.AddToQuery(query, "order_by", req.OrderBy) + parameter.AddToQuery(query, "zone", req.Zone) + parameter.AddToQuery(query, "type", req.Type) + parameter.AddToQuery(query, "image_id", req.ImageID) + parameter.AddToQuery(query, "version_id", req.VersionID) + parameter.AddToQuery(query, "image_label", req.ImageLabel) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/marketplace/v2/local-images", + Query: query, + } + + var resp ListLocalImagesResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetLocalImage: Get detailed information about a local image, including compatible commercial types, supported architecture, labels and the Availability Zone of the image, specified by its `local_image_id` (UUID format). +func (s *API) GetLocalImage(req *GetLocalImageRequest, opts ...scw.RequestOption) (*LocalImage, error) { + var err error + + if fmt.Sprint(req.LocalImageID) == "" { + return nil, errors.New("field LocalImageID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/marketplace/v2/local-images/" + fmt.Sprint(req.LocalImageID) + "", + } + + var resp LocalImage + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// ListCategories: Get a list of all existing categories. The output can be paginated. +func (s *API) ListCategories(req *ListCategoriesRequest, opts ...scw.RequestOption) (*ListCategoriesResponse, error) { + var err error + + defaultPageSize, exist := s.client.GetDefaultPageSize() + if (req.PageSize == nil || *req.PageSize == 0) && exist { + req.PageSize = &defaultPageSize + } + + query := url.Values{} + parameter.AddToQuery(query, "page_size", req.PageSize) + parameter.AddToQuery(query, "page", req.Page) + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/marketplace/v2/categories", + Query: query, + } + + var resp ListCategoriesResponse + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil +} + +// GetCategory: Get information about a specific category of the marketplace catalog, specified by its `category_id` (UUID format). +func (s *API) GetCategory(req *GetCategoryRequest, opts ...scw.RequestOption) (*Category, error) { + var err error + + if fmt.Sprint(req.CategoryID) == "" { + return nil, errors.New("field CategoryID cannot be empty in request") + } + + scwReq := &scw.ScalewayRequest{ + Method: "GET", + Path: "/marketplace/v2/categories/" + fmt.Sprint(req.CategoryID) + "", + } + + var resp Category + + err = s.client.Do(scwReq, &resp, opts...) + if err != nil { + return nil, err + } + return &resp, nil } diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/api/std/std_sdk.go b/vendor/github.com/scaleway/scaleway-sdk-go/api/std/std_sdk.go new file mode 100644 index 0000000000..1086911482 --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-sdk-go/api/std/std_sdk.go @@ -0,0 +1,71 @@ +// This file was automatically generated. DO NOT EDIT. +// If you have any remark or suggestion do not hesitate to open an issue. + +// Package std provides methods and message types of the std API. +package std + +import ( + "bytes" + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/scaleway/scaleway-sdk-go/internal/marshaler" + "github.com/scaleway/scaleway-sdk-go/internal/parameter" + "github.com/scaleway/scaleway-sdk-go/namegenerator" + "github.com/scaleway/scaleway-sdk-go/scw" +) + +// always import dependencies +var ( + _ fmt.Stringer + _ json.Unmarshaler + _ url.URL + _ net.IP + _ http.Header + _ bytes.Reader + _ time.Time + _ = strings.Join + + _ scw.ScalewayRequest + _ marshaler.Duration + _ scw.File + _ = parameter.AddToQuery + _ = namegenerator.GetRandomName +) + +type LanguageCode string + +const ( + LanguageCodeUnknownLanguageCode = LanguageCode("unknown_language_code") + LanguageCodeEnUS = LanguageCode("en_US") + LanguageCodeFrFR = LanguageCode("fr_FR") + LanguageCodeDeDE = LanguageCode("de_DE") +) + +func (enum LanguageCode) String() string { + if enum == "" { + // return default value if empty + return "unknown_language_code" + } + return string(enum) +} + +func (enum LanguageCode) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, enum)), nil +} + +func (enum *LanguageCode) UnmarshalJSON(data []byte) error { + tmp := "" + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + *enum = LanguageCode(LanguageCode(tmp).String()) + return nil +} diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/internal/parameter/query.go b/vendor/github.com/scaleway/scaleway-sdk-go/internal/parameter/query.go index d28979e856..4bbfd20b66 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/internal/parameter/query.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/internal/parameter/query.go @@ -6,6 +6,8 @@ import ( "net/url" "reflect" "time" + + "github.com/scaleway/scaleway-sdk-go/scw" ) // AddToQuery add a key/value pair to an URL query @@ -24,12 +26,16 @@ func AddToQuery(query url.Values, key string, value interface{}) { switch { case elemType == reflect.TypeOf(net.IP{}): query.Add(key, value.(*net.IP).String()) + case elemType == reflect.TypeOf(net.IPNet{}): + query.Add(key, value.(*net.IPNet).String()) + case elemType == reflect.TypeOf(scw.IPNet{}): + query.Add(key, value.(*scw.IPNet).String()) case elemType.Kind() == reflect.Slice: for i := 0; i < elemValue.Len(); i++ { query.Add(key, fmt.Sprint(elemValue.Index(i).Interface())) } case elemType == reflect.TypeOf(time.Time{}): - query.Add(key, value.(time.Time).Format(time.RFC3339)) + query.Add(key, value.(*time.Time).Format(time.RFC3339)) default: query.Add(key, fmt.Sprint(elemValue.Interface())) } diff --git a/vendor/github.com/scaleway/scaleway-sdk-go/scw/custom_types.go b/vendor/github.com/scaleway/scaleway-sdk-go/scw/custom_types.go index 9483fee6ef..673a598eb7 100644 --- a/vendor/github.com/scaleway/scaleway-sdk-go/scw/custom_types.go +++ b/vendor/github.com/scaleway/scaleway-sdk-go/scw/custom_types.go @@ -43,6 +43,28 @@ type File struct { Content io.Reader `json:"content"` } +func (f *File) MarshalJSON() ([]byte, error) { + buf := new(bytes.Buffer) + if f.Content != nil { + _, err := io.Copy(buf, f.Content) + if err != nil { + return nil, err + } + } + + tmpFile := struct { + Name string `json:"name"` + ContentType string `json:"content_type"` + Content string `json:"content"` + }{ + Name: f.Name, + ContentType: f.ContentType, + Content: buf.String(), + } + + return json.Marshal(tmpFile) +} + func (f *File) UnmarshalJSON(b []byte) error { type file File var tmpFile struct { @@ -305,6 +327,15 @@ func (d *Duration) UnmarshalJSON(b []byte) error { return nil } +func NewDurationFromTimeDuration(t time.Duration) *Duration { + duration := Duration{ + Seconds: int64(t.Seconds()), + } + duration.Nanos = int32(t.Nanoseconds() - (time.Duration(duration.Seconds) * time.Second).Nanoseconds()) + + return &duration +} + // splitFloatString splits a float represented in a string, and returns its units (left-coma part) and nanos (right-coma part). // E.g.: // "3" ==> units = 3 | nanos = 0 diff --git a/vendor/github.com/spf13/viper/internal/features/bind_struct.go b/vendor/github.com/spf13/viper/internal/features/bind_struct.go new file mode 100644 index 0000000000..89302c2164 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/features/bind_struct.go @@ -0,0 +1,5 @@ +//go:build viper_bind_struct + +package features + +const BindStruct = true diff --git a/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go b/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go new file mode 100644 index 0000000000..edfaf73b64 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go @@ -0,0 +1,5 @@ +//go:build !viper_bind_struct + +package features + +const BindStruct = false diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index 714b2f2ccb..20eb4da177 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -48,6 +48,7 @@ import ( "github.com/spf13/viper/internal/encoding/json" "github.com/spf13/viper/internal/encoding/toml" "github.com/spf13/viper/internal/encoding/yaml" + "github.com/spf13/viper/internal/features" ) // ConfigMarshalError happens when failing to marshal the configuration. @@ -1114,14 +1115,20 @@ func Unmarshal(rawVal any, opts ...DecoderConfigOption) error { } func (v *Viper) Unmarshal(rawVal any, opts ...DecoderConfigOption) error { - // TODO: make this optional? - structKeys, err := v.decodeStructKeys(rawVal, opts...) - if err != nil { - return err + keys := v.AllKeys() + + if features.BindStruct { + // TODO: make this optional? + structKeys, err := v.decodeStructKeys(rawVal, opts...) + if err != nil { + return err + } + + keys = append(keys, structKeys...) } // TODO: struct keys should be enough? - return decode(v.getSettings(append(v.AllKeys(), structKeys...)), defaultDecoderConfig(rawVal, opts...)) + return decode(v.getSettings(keys), defaultDecoderConfig(rawVal, opts...)) } func (v *Viper) decodeStructKeys(input any, opts ...DecoderConfigOption) ([]string, error) { @@ -1179,7 +1186,20 @@ func (v *Viper) UnmarshalExact(rawVal any, opts ...DecoderConfigOption) error { config := defaultDecoderConfig(rawVal, opts...) config.ErrorUnused = true - return decode(v.AllSettings(), config) + keys := v.AllKeys() + + if features.BindStruct { + // TODO: make this optional? + structKeys, err := v.decodeStructKeys(rawVal, opts...) + if err != nil { + return err + } + + keys = append(keys, structKeys...) + } + + // TODO: struct keys should be enough? + return decode(v.getSettings(keys), config) } // BindPFlags binds a full flag set to the configuration, using each flag's long diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go index 4f506f8791..199c21d27a 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.7 && amd64 && gc && !purego +//go:build amd64 && gc && !purego package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 353bb7cac5..9ae8206c20 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.7 && amd64 && gc && !purego +//go:build amd64 && gc && !purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go deleted file mode 100644 index 1d0770abba..0000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 && amd64 && gc && !purego - -package blake2b - -import "golang.org/x/sys/cpu" - -func init() { - useSSE4 = cpu.X86.HasSSE41 -} - -//go:noescape -func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - if useSSE4 { - hashBlocksSSE4(h, c, flag, blocks) - } else { - hashBlocksGeneric(h, c, flag, blocks) - } -} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go index d9fcac3a4d..54e446e1d2 100644 --- a/vendor/golang.org/x/crypto/blake2b/register.go +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.9 - package blake2b import ( diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go index c0834c00df..cc0bb7ab64 100644 --- a/vendor/golang.org/x/crypto/ssh/channel.go +++ b/vendor/golang.org/x/crypto/ssh/channel.go @@ -187,9 +187,11 @@ type channel struct { pending *buffer extPending *buffer - // windowMu protects myWindow, the flow-control window. - windowMu sync.Mutex - myWindow uint32 + // windowMu protects myWindow, the flow-control window, and myConsumed, + // the number of bytes consumed since we last increased myWindow + windowMu sync.Mutex + myWindow uint32 + myConsumed uint32 // writeMu serializes calls to mux.conn.writePacket() and // protects sentClose and packetPool. This mutex must be @@ -332,14 +334,24 @@ func (ch *channel) handleData(packet []byte) error { return nil } -func (c *channel) adjustWindow(n uint32) error { +func (c *channel) adjustWindow(adj uint32) error { c.windowMu.Lock() - // Since myWindow is managed on our side, and can never exceed - // the initial window setting, we don't worry about overflow. - c.myWindow += uint32(n) + // Since myConsumed and myWindow are managed on our side, and can never + // exceed the initial window setting, we don't worry about overflow. + c.myConsumed += adj + var sendAdj uint32 + if (channelWindowSize-c.myWindow > 3*c.maxIncomingPayload) || + (c.myWindow < channelWindowSize/2) { + sendAdj = c.myConsumed + c.myConsumed = 0 + c.myWindow += sendAdj + } c.windowMu.Unlock() + if sendAdj == 0 { + return nil + } return c.sendMessage(windowAdjustMsg{ - AdditionalBytes: uint32(n), + AdditionalBytes: sendAdj, }) } diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go index bdc356cbdf..fd8c49749e 100644 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -82,7 +82,7 @@ func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan if err := conn.clientHandshake(addr, &fullConf); err != nil { c.Close() - return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) + return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %w", err) } conn.mux = newMux(conn.transport) return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index 49bbba7692..56cdc7c21c 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -35,6 +35,16 @@ type keyingTransport interface { // direction will be effected if a msgNewKeys message is sent // or received. prepareKeyChange(*algorithms, *kexResult) error + + // setStrictMode sets the strict KEX mode, notably triggering + // sequence number resets on sending or receiving msgNewKeys. + // If the sequence number is already > 1 when setStrictMode + // is called, an error is returned. + setStrictMode() error + + // setInitialKEXDone indicates to the transport that the initial key exchange + // was completed + setInitialKEXDone() } // handshakeTransport implements rekeying on top of a keyingTransport @@ -100,6 +110,10 @@ type handshakeTransport struct { // The session ID or nil if first kex did not complete yet. sessionID []byte + + // strictMode indicates if the other side of the handshake indicated + // that we should be following the strict KEX protocol restrictions. + strictMode bool } type pendingKex struct { @@ -209,7 +223,10 @@ func (t *handshakeTransport) readLoop() { close(t.incoming) break } - if p[0] == msgIgnore || p[0] == msgDebug { + // If this is the first kex, and strict KEX mode is enabled, + // we don't ignore any messages, as they may be used to manipulate + // the packet sequence numbers. + if !(t.sessionID == nil && t.strictMode) && (p[0] == msgIgnore || p[0] == msgDebug) { continue } t.incoming <- p @@ -441,6 +458,11 @@ func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { return successPacket, nil } +const ( + kexStrictClient = "kex-strict-c-v00@openssh.com" + kexStrictServer = "kex-strict-s-v00@openssh.com" +) + // sendKexInit sends a key change message. func (t *handshakeTransport) sendKexInit() error { t.mu.Lock() @@ -454,7 +476,6 @@ func (t *handshakeTransport) sendKexInit() error { } msg := &kexInitMsg{ - KexAlgos: t.config.KeyExchanges, CiphersClientServer: t.config.Ciphers, CiphersServerClient: t.config.Ciphers, MACsClientServer: t.config.MACs, @@ -464,6 +485,13 @@ func (t *handshakeTransport) sendKexInit() error { } io.ReadFull(rand.Reader, msg.Cookie[:]) + // We mutate the KexAlgos slice, in order to add the kex-strict extension algorithm, + // and possibly to add the ext-info extension algorithm. Since the slice may be the + // user owned KeyExchanges, we create our own slice in order to avoid using user + // owned memory by mistake. + msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+2) // room for kex-strict and ext-info + msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) + isServer := len(t.hostKeys) > 0 if isServer { for _, k := range t.hostKeys { @@ -488,17 +516,24 @@ func (t *handshakeTransport) sendKexInit() error { msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) } } + + if t.sessionID == nil { + msg.KexAlgos = append(msg.KexAlgos, kexStrictServer) + } } else { msg.ServerHostKeyAlgos = t.hostKeyAlgorithms // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what // algorithms the server supports for public key authentication. See RFC // 8308, Section 2.1. + // + // We also send the strict KEX mode extension algorithm, in order to opt + // into the strict KEX mode. if firstKeyExchange := t.sessionID == nil; firstKeyExchange { - msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1) - msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) msg.KexAlgos = append(msg.KexAlgos, "ext-info-c") + msg.KexAlgos = append(msg.KexAlgos, kexStrictClient) } + } packet := Marshal(msg) @@ -604,6 +639,13 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { return err } + if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) { + t.strictMode = true + if err := t.conn.setStrictMode(); err != nil { + return err + } + } + // We don't send FirstKexFollows, but we handle receiving it. // // RFC 4253 section 7 defines the kex and the agreement method for @@ -679,6 +721,12 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { return unexpectedMessageError(msgNewKeys, packet[0]) } + if firstKeyExchange { + // Indicates to the transport that the first key exchange is completed + // after receiving SSH_MSG_NEWKEYS. + t.conn.setInitialKEXDone() + } + return nil } diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 7f0c236a9a..c2dfe3268c 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -213,6 +213,7 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha } else { for _, algo := range fullConf.PublicKeyAuthAlgorithms { if !contains(supportedPubKeyAuthAlgos, algo) { + c.Close() return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo) } } @@ -220,6 +221,7 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha // Check if the config contains any unsupported key exchanges for _, kex := range fullConf.KeyExchanges { if _, ok := serverForbiddenKexAlgos[kex]; ok { + c.Close() return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) } } diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go index da015801ea..0424d2d37c 100644 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -49,6 +49,9 @@ type transport struct { rand io.Reader isClient bool io.Closer + + strictMode bool + initialKEXDone bool } // packetCipher represents a combination of SSH encryption/MAC @@ -74,6 +77,18 @@ type connectionState struct { pendingKeyChange chan packetCipher } +func (t *transport) setStrictMode() error { + if t.reader.seqNum != 1 { + return errors.New("ssh: sequence number != 1 when strict KEX mode requested") + } + t.strictMode = true + return nil +} + +func (t *transport) setInitialKEXDone() { + t.initialKEXDone = true +} + // prepareKeyChange sets up key material for a keychange. The key changes in // both directions are triggered by reading and writing a msgNewKey packet // respectively. @@ -112,11 +127,12 @@ func (t *transport) printPacket(p []byte, write bool) { // Read and decrypt next packet. func (t *transport) readPacket() (p []byte, err error) { for { - p, err = t.reader.readPacket(t.bufReader) + p, err = t.reader.readPacket(t.bufReader, t.strictMode) if err != nil { break } - if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { + // in strict mode we pass through DEBUG and IGNORE packets only during the initial KEX + if len(p) == 0 || (t.strictMode && !t.initialKEXDone) || (p[0] != msgIgnore && p[0] != msgDebug) { break } } @@ -127,7 +143,7 @@ func (t *transport) readPacket() (p []byte, err error) { return p, err } -func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { +func (s *connectionState) readPacket(r *bufio.Reader, strictMode bool) ([]byte, error) { packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) s.seqNum++ if err == nil && len(packet) == 0 { @@ -140,6 +156,9 @@ func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { select { case cipher := <-s.pendingKeyChange: s.packetCipher = cipher + if strictMode { + s.seqNum = 0 + } default: return nil, errors.New("ssh: got bogus newkeys message") } @@ -170,10 +189,10 @@ func (t *transport) writePacket(packet []byte) error { if debugTransport { t.printPacket(packet, true) } - return t.writer.writePacket(t.bufWriter, t.rand, packet) + return t.writer.writePacket(t.bufWriter, t.rand, packet, t.strictMode) } -func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { +func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte, strictMode bool) error { changeKeys := len(packet) > 0 && packet[0] == msgNewKeys err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) @@ -188,6 +207,9 @@ func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet [] select { case cipher := <-s.pendingKeyChange: s.packetCipher = cipher + if strictMode { + s.seqNum = 0 + } default: panic("ssh: no key material for msgNewKeys") } diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index cf66309c4a..0000000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 0c1b867937..0000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index e31e35a904..0000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 065ff3dfa5..0000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index ec5a638033..0000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json index d10eb13e86..d5eff67a67 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json @@ -9192,7 +9192,7 @@ ] }, "listManagedInstances": { - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listManagedInstances", @@ -11098,6 +11098,55 @@ "https://www.googleapis.com/auth/compute" ] }, + "addNetworkInterface": { + "description": "Adds a network interface to an instance.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/addNetworkInterface", + "httpMethod": "POST", + "id": "compute.instances.addNetworkInterface", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "The instance name for this request stored as resource_id. Name should conform to RFC1035 or be an unsigned long integer.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/instances/{instance}/addNetworkInterface", + "request": { + "$ref": "InstancesAddNetworkInterfaceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "addResourcePolicies": { "description": "Adds existing resource policies to an instance. You can only add one policy right now which will be applied to this instance for scheduling live migrations.", "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/addResourcePolicies", @@ -13379,7 +13428,7 @@ ], "parameters": { "discardLocalSsd": { - "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", "location": "query", "type": "boolean" }, @@ -13436,7 +13485,7 @@ ], "parameters": { "discardLocalSsd": { - "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", "location": "query", "type": "boolean" }, @@ -26215,7 +26264,7 @@ ] }, "listManagedInstances": { - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.listManagedInstances", @@ -40966,7 +41015,7 @@ ] }, "setSslPolicy": { - "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the load balancer. They do not affect the connection between the load balancer and the backends.", "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", "httpMethod": "POST", "id": "compute.targetSslProxies.setSslPolicy", @@ -43552,7 +43601,7 @@ } } }, - "revision": "20231031", + "revision": "20231128", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -44807,6 +44856,10 @@ "description": "Whether to enable UEFI networking for instance creation.", "type": "boolean" }, + "enableWatchdogTimer": { + "description": "Whether to enable the watchdog timer.", + "type": "boolean" + }, "numaNodeCount": { "description": "The number of vNUMA nodes.", "format": "int32", @@ -46752,7 +46805,7 @@ "id": "BackendService", "properties": { "affinityCookieTtlSec": { - "description": "Lifetime of cookies in seconds. This setting is applicable to external and internal HTTP(S) load balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "Lifetime of cookies in seconds. This setting is applicable to Application Load Balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, @@ -46787,7 +46840,7 @@ }, "connectionTrackingPolicy": { "$ref": "BackendServiceConnectionTrackingPolicy", - "description": "Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for Network Load Balancing and Internal TCP/UDP Load Balancing." + "description": "Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for external passthrough Network Load Balancers and internal passthrough Network Load Balancers." }, "consistentHash": { "$ref": "ConsistentHashLoadBalancerSettings", @@ -46820,18 +46873,22 @@ "type": "string" }, "enableCDN": { - "description": "If true, enables Cloud CDN for the backend service of an external HTTP(S) load balancer.", + "description": "If true, enables Cloud CDN for the backend service of a global external Application Load Balancer.", "type": "boolean" }, "failoverPolicy": { "$ref": "BackendServiceFailoverPolicy", - "description": "Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview)." + "description": "Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview)." }, "fingerprint": { "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a BackendService.", "format": "byte", "type": "string" }, + "haPolicy": { + "$ref": "BackendServiceHAPolicy", + "description": "Configuring haPolicy is not supported." + }, "healthChecks": { "description": "The list of URLs to the healthChecks, httpHealthChecks (legacy), or httpsHealthChecks (legacy) resource for health checking this backend service. Not all backend services support legacy health checks. See Load balancer guide. Currently, at most one health check can be specified for each backend service. Backend services with instance group or zonal NEG backends must have a health check. Backend services with internet or serverless NEG backends must not have a health check.", "items": { @@ -46841,7 +46898,7 @@ }, "iap": { "$ref": "BackendServiceIAP", - "description": "The configurations for Identity-Aware Proxy on this resource. Not available for Internal TCP/UDP Load Balancing and Network Load Balancing." + "description": "The configurations for Identity-Aware Proxy on this resource. Not available for internal passthrough Network Load Balancers and external passthrough Network Load Balancers." }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -46849,7 +46906,7 @@ "type": "string" }, "ipAddressSelectionPolicy": { - "description": "Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ", + "description": "Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced global external Application Load Balancer (load balancing scheme EXTERNAL_MANAGED), - Regional external Application Load Balancer, - Internal proxy Network Load Balancer (load balancing scheme INTERNAL_MANAGED), - Regional internal Application Load Balancer (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ", "enum": [ "IPV4_ONLY", "IPV6_ONLY", @@ -46880,10 +46937,10 @@ "INVALID_LOAD_BALANCING_SCHEME" ], "enumDescriptions": [ - "Signifies that this will be used for external HTTP(S), SSL Proxy, TCP Proxy, or Network Load Balancing", - "Signifies that this will be used for External Managed HTTP(S) Load Balancing.", - "Signifies that this will be used for Internal TCP/UDP Load Balancing.", - "Signifies that this will be used for Internal HTTP(S) Load Balancing.", + "Signifies that this will be used for classic Application Load Balancers, global external proxy Network Load Balancers, or external passthrough Network Load Balancers.", + "Signifies that this will be used for global external Application Load Balancers, regional external Application Load Balancers, or regional external proxy Network Load Balancers.", + "Signifies that this will be used for internal passthrough Network Load Balancers.", + "Signifies that this will be used for internal Application Load Balancers.", "Signifies that this will be used by Traffic Director.", "" ], @@ -46950,12 +47007,12 @@ }, "port": { "deprecated": true, - "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port.", + "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For internal passthrough Network Load Balancers and external passthrough Network Load Balancers, omit port.", "format": "int32", "type": "integer" }, "portName": { - "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port_name.", + "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For internal passthrough Network Load Balancers and external passthrough Network Load Balancers, omit port_name.", "type": "string" }, "protocol": { @@ -47356,11 +47413,11 @@ "type": "string" }, "enableStrongAffinity": { - "description": "Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly.", + "description": "Enable Strong Session Affinity for external passthrough Network Load Balancers. This option is not available publicly.", "type": "boolean" }, "idleTimeoutSec": { - "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly.", + "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For internal passthrough Network Load Balancers: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For external passthrough Network Load Balancers the default is 60 seconds. This option is not available publicly.", "format": "int32", "type": "integer" }, @@ -47382,7 +47439,7 @@ "type": "object" }, "BackendServiceFailoverPolicy": { - "description": "For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", + "description": "For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", "id": "BackendServiceFailoverPolicy", "properties": { "disableConnectionDrainOnFailover": { @@ -47390,7 +47447,7 @@ "type": "boolean" }, "dropTrafficIfUnhealthy": { - "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.", + "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.", "type": "boolean" }, "failoverRatio": { @@ -47426,6 +47483,52 @@ }, "type": "object" }, + "BackendServiceHAPolicy": { + "id": "BackendServiceHAPolicy", + "properties": { + "fastIPMove": { + "description": "Enabling fastIPMove is not supported.", + "enum": [ + "DISABLED", + "GARP_RA" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "leader": { + "$ref": "BackendServiceHAPolicyLeader", + "description": "Setting a leader is not supported." + } + }, + "type": "object" + }, + "BackendServiceHAPolicyLeader": { + "id": "BackendServiceHAPolicyLeader", + "properties": { + "backendGroup": { + "description": "Setting backendGroup is not supported.", + "type": "string" + }, + "networkEndpoint": { + "$ref": "BackendServiceHAPolicyLeaderNetworkEndpoint", + "description": "Setting a network endpoint as leader is not supported." + } + }, + "type": "object" + }, + "BackendServiceHAPolicyLeaderNetworkEndpoint": { + "id": "BackendServiceHAPolicyLeaderNetworkEndpoint", + "properties": { + "instance": { + "description": "Specifying the instance name of a leader is not supported.", + "type": "string" + } + }, + "type": "object" + }, "BackendServiceIAP": { "description": "Identity-Aware Proxy", "id": "BackendServiceIAP", @@ -49927,6 +50030,11 @@ "$ref": "DiskResourceStatus", "description": "[Output Only] Status information for the disk resource." }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -52682,7 +52790,7 @@ "type": "object" }, "ForwardingRule": { - "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/alpha/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/alpha/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud Platform load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.", + "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/alpha/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/alpha/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.", "id": "ForwardingRule", "properties": { "IPAddress": { @@ -52718,7 +52826,7 @@ "type": "boolean" }, "allowGlobalAccess": { - "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.", + "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the internal passthrough Network Load Balancers, the regional internal Application Load Balancer, and the regional internal proxy Network Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.", "type": "boolean" }, "allowPscGlobalAccess": { @@ -52730,11 +52838,11 @@ "type": "boolean" }, "backendService": { - "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for Internal TCP/UDP Load Balancing and Network Load Balancing; must be omitted for all other load balancer types.", + "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for internal and external passthrough Network Load Balancers; must be omitted for all other load balancer types.", "type": "string" }, "baseForwardingRule": { - "description": "[Output Only] The URL for the corresponding base Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the same IP address, protocol, and port settings with the current Forwarding Rule, but without sourceIPRanges specified. Always empty if the current Forwarding Rule does not have sourceIPRanges specified.", + "description": "[Output Only] The URL for the corresponding base forwarding rule. By base forwarding rule, we mean the forwarding rule that has the same IP address, protocol, and port settings with the current forwarding rule, but without sourceIPRanges specified. Always empty if the current forwarding rule does not have sourceIPRanges specified.", "type": "string" }, "creationTimestamp": { @@ -52779,7 +52887,7 @@ }, "kind": { "default": "compute#forwardingRule", - "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", + "description": "[Output Only] Type of the resource. Always compute#forwardingRule for forwarding rule resources.", "type": "string" }, "labelFingerprint": { @@ -52827,7 +52935,7 @@ "type": "string" }, "network": { - "description": "This field is not used for global external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "description": "This field is not used for global external load balancing. For internal passthrough Network Load Balancers, this field identifies the network that the load balanced IP should belong to for this forwarding rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", "type": "string" }, "networkTier": { @@ -52864,7 +52972,7 @@ "type": "array" }, "pscConnectionId": { - "description": "[Output Only] The PSC connection id of the PSC Forwarding Rule.", + "description": "[Output Only] The PSC connection id of the PSC forwarding rule.", "format": "uint64", "type": "string" }, @@ -52907,23 +53015,23 @@ "type": "array" }, "serviceLabel": { - "description": "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", + "description": "An optional prefix to the service name for this forwarding rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, "serviceName": { - "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.", + "description": "[Output Only] The internal fully qualified service name for this forwarding rule. This field is only used for internal load balancing.", "type": "string" }, "sourceIpRanges": { - "description": "If not empty, this Forwarding Rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a Forwarding Rule can only have up to 64 source IP ranges, and this field can only be used with a regional Forwarding Rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", + "description": "If not empty, this forwarding rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a forwarding rule can only have up to 64 source IP ranges, and this field can only be used with a regional forwarding rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", "items": { "type": "string" }, "type": "array" }, "subnetwork": { - "description": "This field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule, used in internal load balancing and network load balancing with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", + "description": "This field identifies the subnetwork that the load balanced IP should belong to for this forwarding rule, used with internal load balancers and external passthrough Network Load Balancers with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", "type": "string" }, "target": { @@ -53254,7 +53362,7 @@ "type": "object" }, "ForwardingRuleServiceDirectoryRegistration": { - "description": "Describes the auto-registration of the Forwarding Rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this Forwarding Rule.", + "description": "Describes the auto-registration of the forwarding rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this forwarding rule.", "id": "ForwardingRuleServiceDirectoryRegistration", "properties": { "namespace": { @@ -53266,7 +53374,7 @@ "type": "string" }, "serviceDirectoryRegion": { - "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region.", + "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs forwarding rules on the same network should use the same Service Directory region.", "type": "string" } }, @@ -53410,12 +53518,12 @@ "id": "FutureReservation", "properties": { "autoCreatedReservationsDeleteTime": { - "description": "Future timestamp when the FR auto-created reservations will be deleted by GCE. Format of this field must be a valid href=\"https://www.ietf.org/rfc/rfc3339.txt\"\u003eRFC3339 value.", + "description": "Future timestamp when the FR auto-created reservations will be deleted by Compute Engine. Format of this field must be a valid href=\"https://www.ietf.org/rfc/rfc3339.txt\"\u003eRFC3339 value.", "type": "string" }, "autoCreatedReservationsDuration": { "$ref": "Duration", - "description": "Specifies the duration of auto-created reservations. It represents relative time to future reservation start_time when auto-created reservations will be automatically deleted by GCE. Duration time unit is represented as a count of seconds and fractions of seconds at nanosecond resolution." + "description": "Specifies the duration of auto-created reservations. It represents relative time to future reservation start_time when auto-created reservations will be automatically deleted by Compute Engine. Duration time unit is represented as a count of seconds and fractions of seconds at nanosecond resolution." }, "autoDeleteAutoCreatedReservations": { "description": "Setting for enabling or disabling automatic deletion for auto-created reservation. If set to true, auto-created reservations will be deleted at Future Reservation's end time (default) or at user's defined timestamp if any of the [auto_created_reservations_delete_time, auto_created_reservations_duration] values is specified. For keeping auto-created reservation indefinitely, this value should be set to false.", @@ -54174,7 +54282,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -54445,7 +54553,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -54512,7 +54620,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -54579,7 +54687,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -54630,7 +54738,7 @@ "type": "object" }, "HealthCheck": { - "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/alpha/regionHealthChecks) * [Global](/compute/docs/reference/rest/alpha/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** The following load balancer can use either regional or global health check: * Internal TCP/UDP load balancer The following load balancers require regional health check: * Internal HTTP(S) load balancer * Backend service-based network load balancer Traffic Director and the following load balancers require global health check: * External HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load balancer The following load balancer require [legacy HTTP health checks](/compute/docs/reference/rest/v1/httpHealthChecks): * Target pool-based network load balancer **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.", + "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/alpha/regionHealthChecks) * [Global](/compute/docs/reference/rest/alpha/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** Health check requirements vary depending on the type of load balancer. For details about the type of health check supported for each load balancer and corresponding backend type, see Health checks overview: Load balancer guide. **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.", "id": "HealthCheck", "properties": { "checkIntervalSec": { @@ -56913,6 +57021,11 @@ "$ref": "RolloutPolicy", "description": "A rollout policy to apply to this image. When specified, the rollout policy overrides per-zone references to the image via the associated image family. The rollout policy restricts the zones where this image is accessible when using a zonal image family reference. When the rollout policy does not include the user specified zone, or if the zone is rolled out, this image is accessible. The rollout policy for this image is read-only, except for allowlisted users. This field might not be configured. To view the latest non-deprecated image in a specific zone, use the imageFamilyViews.get method." }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -57408,6 +57521,10 @@ "$ref": "ResourceStatus", "description": "[Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field." }, + "satisfiesPzi": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -58213,6 +58330,10 @@ }, "type": "array" }, + "params": { + "$ref": "InstanceGroupManagerParams", + "description": "Input only. Additional params passed with the request, but not persisted as part of resource payload." + }, "region": { "description": "[Output Only] The URL of the region where the managed instance group resides (for regional resources).", "type": "string" @@ -58829,6 +58950,20 @@ }, "type": "object" }, + "InstanceGroupManagerParams": { + "description": "Input only additional params for instance group manager creation.", + "id": "InstanceGroupManagerParams", + "properties": { + "resourceManagerTags": { + "additionalProperties": { + "type": "string" + }, + "description": "Resource manager tags to be bound to the instance group manager. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/123`, and values are in the format `tagValues/456`. The field is allowed for INSERT only.", + "type": "object" + } + }, + "type": "object" + }, "InstanceGroupManagerResizeRequest": { "description": "InstanceGroupManagerResizeRequest represents a request to create a number of VMs: either immediately or by queuing the request for the specified time. This resize request is nested under InstanceGroupManager and the VMs created by this request are added to the owning InstanceGroupManager.", "id": "InstanceGroupManagerResizeRequest", @@ -59138,14 +59273,14 @@ "type": "integer" }, "mode": { - "description": "Defines behaviour of using instances from standby pool to resize MIG.", + "description": "Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. The default mode is `MANUAL`.", "enum": [ "MANUAL", "SCALE_OUT_POOL" ], "enumDescriptions": [ - "MIG does not automatically stop/start or suspend/resume VMs.", - "MIG automatically resumes and starts VMs when it scales out, and replenishes the standby pool afterwards." + "MIG does not automatically resume or start VMs in the standby pool when the group scales out.", + "MIG automatically resumes or starts VMs in the standby pool when the group scales out, and replenishes the standby pool afterwards." ], "type": "string" } @@ -61496,6 +61631,16 @@ }, "type": "object" }, + "InstancesAddNetworkInterfaceRequest": { + "id": "InstancesAddNetworkInterfaceRequest", + "properties": { + "network_interface": { + "$ref": "NetworkInterface", + "description": "The new network interface to add." + } + }, + "type": "object" + }, "InstancesAddResourcePoliciesRequest": { "id": "InstancesAddResourcePoliciesRequest", "properties": { @@ -61959,6 +62104,11 @@ "$ref": "InstantSnapshotResourceStatus", "description": "[Output Only] Status information for the instant snapshot resource." }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -65706,6 +65856,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -81536,6 +81691,10 @@ "bfdStatus": { "$ref": "BfdStatus" }, + "enableIpv4": { + "description": "Enable IPv4 traffic over BGP Peer. It is enabled by default if the peerIpAddress is version 4.", + "type": "boolean" + }, "enableIpv6": { "description": "Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default.", "type": "boolean" @@ -81544,6 +81703,10 @@ "description": "IP address of the local BGP interface.", "type": "string" }, + "ipv4NexthopAddress": { + "description": "IPv4 address of the local BGP interface.", + "type": "string" + }, "ipv6NexthopAddress": { "description": "IPv6 address of the local BGP interface.", "type": "string" @@ -81569,6 +81732,10 @@ "description": "IP address of the remote BGP interface.", "type": "string" }, + "peerIpv4NexthopAddress": { + "description": "IPv4 address of the remote BGP interface.", + "type": "string" + }, "peerIpv6NexthopAddress": { "description": "IPv6 address of the remote BGP interface.", "type": "string" @@ -81598,11 +81765,17 @@ "statusReason": { "description": "Indicates why particular status was returned.", "enum": [ + "IPV4_PEER_ON_IPV6_ONLY_CONNECTION", + "IPV6_PEER_ON_IPV4_ONLY_CONNECTION", "MD5_AUTH_INTERNAL_PROBLEM", + "MISSING_NETWORK_CONNECTIVITY_CENTER_SPOKE", "STATUS_REASON_UNSPECIFIED" ], "enumDescriptions": [ + "BGP peer disabled because it requires IPv4 but the underlying connection is IPv6-only.", + "BGP peer disabled because it requires IPv6 but the underlying connection is IPv4-only.", "Indicates internal problems with configuration of MD5 authentication. This particular reason can only be returned when md5AuthEnabled is true and status is DOWN.", + "BGP peer disabled because it is not labeled as an NCC spoke. Currently, BGP peers using SD-WAN connectivity will be disabled for this reason.", "" ], "type": "string" @@ -82289,7 +82462,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -83245,10 +83418,58 @@ "format": "float", "type": "number" }, + "detectionAbsoluteQps": { + "format": "float", + "type": "number" + }, + "detectionLoadThreshold": { + "format": "float", + "type": "number" + }, + "detectionRelativeToBaselineQps": { + "format": "float", + "type": "number" + }, "name": { "description": "The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" + }, + "trafficGranularityConfigs": { + "description": "Configuration options for enabling Adaptive Protection to operate on specified granular traffic units.", + "items": { + "$ref": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig": { + "description": "Configurations to specifc granular traffic units processed by Adaptive Protection.", + "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig", + "properties": { + "enableEachUniqueValue": { + "description": "If enabled, traffic matching each unique value for the specified type constitutes a separate traffic unit. It can only be set to true if `value` is empty.", + "type": "boolean" + }, + "type": { + "description": "Type of this configuration.", + "enum": [ + "HTTP_HEADER_HOST", + "HTTP_PATH", + "UNSPECIFIED_TYPE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "value": { + "description": "Requests that match this value constitute a granular traffic unit.", + "type": "string" } }, "type": "object" @@ -83987,7 +84208,7 @@ "type": "string" }, "enforceOnKey": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on \"userIpRequestHeaders\" configured with the security policy. If there is no \"userIpRequestHeaders\" configuration or an IP address cannot be resolved from it, the key type defaults to IP. ", "enum": [ "ALL", "ALL_IPS", @@ -83997,6 +84218,8 @@ "IP", "REGION_CODE", "SNI", + "TLS_JA3_FINGERPRINT", + "USER_IP", "XFF_IP" ], "enumDeprecated": [ @@ -84008,6 +84231,8 @@ false, false, false, + false, + false, false ], "enumDescriptions": [ @@ -84019,6 +84244,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -84061,7 +84288,7 @@ "type": "string" }, "enforceOnKeyType": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on \"userIpRequestHeaders\" configured with the security policy. If there is no \"userIpRequestHeaders\" configuration or an IP address cannot be resolved from it, the key type defaults to IP. ", "enum": [ "ALL", "ALL_IPS", @@ -84071,6 +84298,8 @@ "IP", "REGION_CODE", "SNI", + "TLS_JA3_FINGERPRINT", + "USER_IP", "XFF_IP" ], "enumDeprecated": [ @@ -84082,6 +84311,8 @@ false, false, false, + false, + false, false ], "enumDescriptions": [ @@ -84093,6 +84324,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -85411,6 +85644,11 @@ "description": "[Output Only] URL of the region where the snapshot resides. Only applicable for regional snapshots.", "type": "string" }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -87217,7 +87455,7 @@ "type": "object" }, "SslPolicy": { - "description": "Represents an SSL Policy resource. Use SSL policies to control the SSL features, such as versions and cipher suites, offered by an HTTPS or SSL Proxy load balancer. For more information, read SSL Policy Concepts.", + "description": "Represents an SSL Policy resource. Use SSL policies to control SSL features, such as versions and cipher suites, that are offered by Application Load Balancers and proxy Network Load Balancers. For more information, read SSL policies overview.", "id": "SslPolicy", "properties": { "creationTimestamp": { @@ -89769,7 +90007,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -91656,7 +91894,7 @@ "type": "object" }, "TargetPool": { - "description": "Represents a Target Pool resource. Target pools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools.", + "description": "Represents a Target Pool resource. Target pools are used with external passthrough Network Load Balancers. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools.", "id": "TargetPool", "properties": { "backupPool": { @@ -92323,7 +92561,7 @@ "type": "object" }, "TargetSslProxy": { - "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a SSL Proxy load balancer. Global forwarding rules reference a target SSL proxy, and the target proxy then references an external backend service. For more information, read Using Target Proxies.", + "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target SSL proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview.", "id": "TargetSslProxy", "properties": { "certificateMap": { @@ -92702,7 +92940,7 @@ "type": "object" }, "TargetTcpProxy": { - "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a TCP Proxy load balancer. Global forwarding rules reference target TCP proxy, and the target proxy then references an external backend service. For more information, read TCP Proxy Load Balancing overview.", + "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target TCP proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview.", "id": "TargetTcpProxy", "properties": { "creationTimestamp": { diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go index 38a0a57bec..90af9285dc 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go @@ -3068,6 +3068,9 @@ type AdvancedMachineFeatures struct { // creation. EnableUefiNetworking bool `json:"enableUefiNetworking,omitempty"` + // EnableWatchdogTimer: Whether to enable the watchdog timer. + EnableWatchdogTimer bool `json:"enableWatchdogTimer,omitempty"` + // NumaNodeCount: The number of vNUMA nodes. NumaNodeCount int64 `json:"numaNodeCount,omitempty"` @@ -6100,13 +6103,13 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { // For more information, see Backend Services. type BackendService struct { // AffinityCookieTtlSec: Lifetime of cookies in seconds. This setting is - // applicable to external and internal HTTP(S) load balancers and - // Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session - // affinity. If set to 0, the cookie is non-persistent and lasts only - // until the end of the browser session (or equivalent). The maximum - // allowed value is two weeks (1,209,600). Not supported when the - // backend service is referenced by a URL map that is bound to target - // gRPC proxy that has validateForProxyless field set to true. + // applicable to Application Load Balancers and Traffic Director and + // requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to + // 0, the cookie is non-persistent and lasts only until the end of the + // browser session (or equivalent). The maximum allowed value is two + // weeks (1,209,600). Not supported when the backend service is + // referenced by a URL map that is bound to target gRPC proxy that has + // validateForProxyless field set to true. AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` // Backends: The list of backends that serve this BackendService. @@ -6132,8 +6135,8 @@ type BackendService struct { // ConnectionTrackingPolicy: Connection Tracking configuration for this // BackendService. Connection tracking policy settings are only - // available for Network Load Balancing and Internal TCP/UDP Load - // Balancing. + // available for external passthrough Network Load Balancers and + // internal passthrough Network Load Balancers. ConnectionTrackingPolicy *BackendServiceConnectionTrackingPolicy `json:"connectionTrackingPolicy,omitempty"` // ConsistentHash: Consistent Hash-based load balancing can be used to @@ -6172,15 +6175,15 @@ type BackendService struct { // security policy associated with this backend service. EdgeSecurityPolicy string `json:"edgeSecurityPolicy,omitempty"` - // EnableCDN: If true, enables Cloud CDN for the backend service of an - // external HTTP(S) load balancer. + // EnableCDN: If true, enables Cloud CDN for the backend service of a + // global external Application Load Balancer. EnableCDN bool `json:"enableCDN,omitempty"` // FailoverPolicy: Requires at least one backend instance group to be // defined as a backup (failover) backend. For load balancers that have - // configurable failover: Internal TCP/UDP Load Balancing + // configurable failover: Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) - // and external TCP/UDP Load Balancing + // and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). FailoverPolicy *BackendServiceFailoverPolicy `json:"failoverPolicy,omitempty"` @@ -6193,6 +6196,9 @@ type BackendService struct { // BackendService. Fingerprint string `json:"fingerprint,omitempty"` + // HaPolicy: Configuring haPolicy is not supported. + HaPolicy *BackendServiceHAPolicy `json:"haPolicy,omitempty"` + // HealthChecks: The list of URLs to the healthChecks, httpHealthChecks // (legacy), or httpsHealthChecks (legacy) resource for health checking // this backend service. Not all backend services support legacy health @@ -6204,8 +6210,8 @@ type BackendService struct { HealthChecks []string `json:"healthChecks,omitempty"` // Iap: The configurations for Identity-Aware Proxy on this resource. - // Not available for Internal TCP/UDP Load Balancing and Network Load - // Balancing. + // Not available for internal passthrough Network Load Balancers and + // external passthrough Network Load Balancers. Iap *BackendServiceIAP `json:"iap,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -6225,13 +6231,13 @@ type BackendService struct { // backend service (Instance Group, Managed Instance Group, Network // Endpoint Group), regardless of traffic from the client to the proxy. // Only IPv6 health checks are used to check the health of the backends. - // This field is applicable to either: - Advanced Global External HTTPS - // Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional - // External HTTPS Load Balancing, - Internal TCP Proxy (load balancing - // scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing - // (load balancing scheme INTERNAL_MANAGED), - Traffic Director with - // Envoy proxies and proxyless gRPC (load balancing scheme - // INTERNAL_SELF_MANAGED). + // This field is applicable to either: - Advanced global external + // Application Load Balancer (load balancing scheme EXTERNAL_MANAGED), - + // Regional external Application Load Balancer, - Internal proxy Network + // Load Balancer (load balancing scheme INTERNAL_MANAGED), - Regional + // internal Application Load Balancer (load balancing scheme + // INTERNAL_MANAGED), - Traffic Director with Envoy proxies and + // proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). // // Possible values: // "IPV4_ONLY" - Only send IPv4 traffic to the backends of the Backend @@ -6259,14 +6265,16 @@ type BackendService struct { // another. For more information, refer to Choosing a load balancer. // // Possible values: - // "EXTERNAL" - Signifies that this will be used for external HTTP(S), - // SSL Proxy, TCP Proxy, or Network Load Balancing - // "EXTERNAL_MANAGED" - Signifies that this will be used for External - // Managed HTTP(S) Load Balancing. - // "INTERNAL" - Signifies that this will be used for Internal TCP/UDP - // Load Balancing. - // "INTERNAL_MANAGED" - Signifies that this will be used for Internal - // HTTP(S) Load Balancing. + // "EXTERNAL" - Signifies that this will be used for classic + // Application Load Balancers, global external proxy Network Load + // Balancers, or external passthrough Network Load Balancers. + // "EXTERNAL_MANAGED" - Signifies that this will be used for global + // external Application Load Balancers, regional external Application + // Load Balancers, or regional external proxy Network Load Balancers. + // "INTERNAL" - Signifies that this will be used for internal + // passthrough Network Load Balancers. + // "INTERNAL_MANAGED" - Signifies that this will be used for internal + // Application Load Balancers. // "INTERNAL_SELF_MANAGED" - Signifies that this will be used by // Traffic Director. // "INVALID_LOAD_BALANCING_SCHEME" @@ -6408,16 +6416,18 @@ type BackendService struct { OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` // Port: Deprecated in favor of portName. The TCP port to connect on the - // backend. The default value is 80. For Internal TCP/UDP Load Balancing - // and Network Load Balancing, omit port. + // backend. The default value is 80. For internal passthrough Network + // Load Balancers and external passthrough Network Load Balancers, omit + // port. Port int64 `json:"port,omitempty"` // PortName: A named port on a backend instance group representing the // port for communication to the backend VMs in that group. The named // port must be defined on each backend instance group // (https://cloud.google.com/load-balancing/docs/backend-service#named_ports). - // This parameter has no meaning if the backends are NEGs. For Internal - // TCP/UDP Load Balancing and Network Load Balancing, omit port_name. + // This parameter has no meaning if the backends are NEGs. For internal + // passthrough Network Load Balancers and external passthrough Network + // Load Balancers, omit port_name. PortName string `json:"portName,omitempty"` // Protocol: The protocol this BackendService uses to communicate with @@ -7028,18 +7038,19 @@ type BackendServiceConnectionTrackingPolicy struct { // "NEVER_PERSIST" ConnectionPersistenceOnUnhealthyBackends string `json:"connectionPersistenceOnUnhealthyBackends,omitempty"` - // EnableStrongAffinity: Enable Strong Session Affinity for Network Load - // Balancing. This option is not available publicly. + // EnableStrongAffinity: Enable Strong Session Affinity for external + // passthrough Network Load Balancers. This option is not available + // publicly. EnableStrongAffinity bool `json:"enableStrongAffinity,omitempty"` // IdleTimeoutSec: Specifies how long to keep a Connection Tracking - // entry while there is no matching traffic (in seconds). For Internal - // TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the - // maximum is 16 hours. - It can be set only if Connection Tracking is - // less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, - // CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For - // Network Load Balancer the default is 60 seconds. This option is not - // available publicly. + // entry while there is no matching traffic (in seconds). For internal + // passthrough Network Load Balancers: - The minimum (default) is 10 + // minutes and the maximum is 16 hours. - It can be set only if + // Connection Tracking is less than 5-tuple (i.e. Session Affinity is + // CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking + // Mode is PER_SESSION). For external passthrough Network Load Balancers + // the default is 60 seconds. This option is not available publicly. IdleTimeoutSec int64 `json:"idleTimeoutSec,omitempty"` // TrackingMode: Specifies the key used for connection tracking. There @@ -7085,9 +7096,9 @@ func (s *BackendServiceConnectionTrackingPolicy) MarshalJSON() ([]byte, error) { } // BackendServiceFailoverPolicy: For load balancers that have -// configurable failover: Internal TCP/UDP Load Balancing +// configurable failover: Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) -// and external TCP/UDP Load Balancing +// and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). // On failover or failback, this field indicates whether connection // draining will be honored. Google Cloud has a fixed connection @@ -7106,9 +7117,9 @@ type BackendServiceFailoverPolicy struct { // unhealthy.If set to false, connections are distributed among all // primary VMs when all primary and all backup backend VMs are // unhealthy. For load balancers that have configurable failover: - // Internal TCP/UDP Load Balancing + // Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) - // and external TCP/UDP Load Balancing + // and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). // The default is false. DropTrafficIfUnhealthy bool `json:"dropTrafficIfUnhealthy,omitempty"` @@ -7204,6 +7215,98 @@ func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type BackendServiceHAPolicy struct { + // FastIPMove: Enabling fastIPMove is not supported. + // + // Possible values: + // "DISABLED" + // "GARP_RA" + FastIPMove string `json:"fastIPMove,omitempty"` + + // Leader: Setting a leader is not supported. + Leader *BackendServiceHAPolicyLeader `json:"leader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FastIPMove") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FastIPMove") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceHAPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceHAPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServiceHAPolicyLeader struct { + // BackendGroup: Setting backendGroup is not supported. + BackendGroup string `json:"backendGroup,omitempty"` + + // NetworkEndpoint: Setting a network endpoint as leader is not + // supported. + NetworkEndpoint *BackendServiceHAPolicyLeaderNetworkEndpoint `json:"networkEndpoint,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackendGroup") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackendGroup") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceHAPolicyLeader) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceHAPolicyLeader + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServiceHAPolicyLeaderNetworkEndpoint struct { + // Instance: Specifying the instance name of a leader is not supported. + Instance string `json:"instance,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instance") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instance") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceHAPolicyLeaderNetworkEndpoint) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceHAPolicyLeaderNetworkEndpoint + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BackendServiceIAP: Identity-Aware Proxy type BackendServiceIAP struct { // Enabled: Whether the serving infrastructure will authenticate and @@ -10707,6 +10810,9 @@ type Disk struct { // resource. ResourceStatus *DiskResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzi: Output only. Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -14774,10 +14880,10 @@ func (s *FixedOrPercent) MarshalJSON() ([]byte, error) { // * Regional // (https://cloud.google.com/compute/docs/reference/rest/alpha/forwardingRules) // A forwarding rule and its corresponding IP address represent the -// frontend configuration of a Google Cloud Platform load balancer. -// Forwarding rules can also reference target instances and Cloud VPN -// Classic gateways (targetVpnGateway). For more information, read -// Forwarding rule concepts and Using protocol forwarding. +// frontend configuration of a Google Cloud load balancer. Forwarding +// rules can also reference target instances and Cloud VPN Classic +// gateways (targetVpnGateway). For more information, read Forwarding +// rule concepts and Using protocol forwarding. type ForwardingRule struct { // IPAddress: IP address for which this forwarding rule accepts traffic. // When a client sends traffic to this IP address, the forwarding rule @@ -14842,8 +14948,9 @@ type ForwardingRule struct { // AllowGlobalAccess: This field is used along with the backend_service // field for internal load balancing or with the target field for // internal TargetInstance. If set to true, clients can access the - // Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load - // Balancer from all regions. If false, only allows access from the + // internal passthrough Network Load Balancers, the regional internal + // Application Load Balancer, and the regional internal proxy Network + // Load Balancer from all regions. If false, only allows access from the // local region the load balancer is located at. Note that for // INTERNAL_MANAGED forwarding rules, this field cannot be changed after // the forwarding rule is created. @@ -14861,16 +14968,16 @@ type ForwardingRule struct { AllowPscPacketInjection bool `json:"allowPscPacketInjection,omitempty"` // BackendService: Identifies the backend service to which the - // forwarding rule sends traffic. Required for Internal TCP/UDP Load - // Balancing and Network Load Balancing; must be omitted for all other + // forwarding rule sends traffic. Required for internal and external + // passthrough Network Load Balancers; must be omitted for all other // load balancer types. BackendService string `json:"backendService,omitempty"` // BaseForwardingRule: [Output Only] The URL for the corresponding base - // Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule + // forwarding rule. By base forwarding rule, we mean the forwarding rule // that has the same IP address, protocol, and port settings with the - // current Forwarding Rule, but without sourceIPRanges specified. Always - // empty if the current Forwarding Rule does not have sourceIPRanges + // current forwarding rule, but without sourceIPRanges specified. Always + // empty if the current forwarding rule does not have sourceIPRanges // specified. BaseForwardingRule string `json:"baseForwardingRule,omitempty"` @@ -14923,7 +15030,7 @@ type ForwardingRule struct { IsMirroringCollector bool `json:"isMirroringCollector,omitempty"` // Kind: [Output Only] Type of the resource. Always - // compute#forwardingRule for Forwarding Rule resources. + // compute#forwardingRule for forwarding rule resources. Kind string `json:"kind,omitempty"` // LabelFingerprint: A fingerprint for the labels being applied to this @@ -14986,10 +15093,10 @@ type ForwardingRule struct { Name string `json:"name,omitempty"` // Network: This field is not used for global external load balancing. - // For Internal TCP/UDP Load Balancing, this field identifies the - // network that the load balanced IP should belong to for this - // Forwarding Rule. If the subnetwork is specified, the network of the - // subnetwork will be used. If neither subnetwork nor this field is + // For internal passthrough Network Load Balancers, this field + // identifies the network that the load balanced IP should belong to for + // this forwarding rule. If the subnetwork is specified, the network of + // the subnetwork will be used. If neither subnetwork nor this field is // specified, the default network will be used. For Private Service // Connect forwarding rules that forward traffic to Google APIs, a // network must be provided. @@ -15058,7 +15165,7 @@ type ForwardingRule struct { Ports []string `json:"ports,omitempty"` // PscConnectionId: [Output Only] The PSC connection id of the PSC - // Forwarding Rule. + // forwarding rule. PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` // Possible values: @@ -15092,7 +15199,7 @@ type ForwardingRule struct { ServiceDirectoryRegistrations []*ForwardingRuleServiceDirectoryRegistration `json:"serviceDirectoryRegistrations,omitempty"` // ServiceLabel: An optional prefix to the service name for this - // Forwarding Rule. If specified, the prefix is the first label of the + // forwarding rule. If specified, the prefix is the first label of the // fully qualified service name. The label must be 1-63 characters long, // and comply with RFC1035. Specifically, the label must be 1-63 // characters long and match the regular expression @@ -15103,25 +15210,26 @@ type ForwardingRule struct { ServiceLabel string `json:"serviceLabel,omitempty"` // ServiceName: [Output Only] The internal fully qualified service name - // for this Forwarding Rule. This field is only used for internal load + // for this forwarding rule. This field is only used for internal load // balancing. ServiceName string `json:"serviceName,omitempty"` - // SourceIpRanges: If not empty, this Forwarding Rule will only forward + // SourceIpRanges: If not empty, this forwarding rule will only forward // the traffic when the source IP address matches one of the IP - // addresses or CIDR ranges set here. Note that a Forwarding Rule can + // addresses or CIDR ranges set here. Note that a forwarding rule can // only have up to 64 source IP ranges, and this field can only be used - // with a regional Forwarding Rule whose scheme is EXTERNAL. Each + // with a regional forwarding rule whose scheme is EXTERNAL. Each // source_ip_range entry should be either an IP address (for example, // 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24). SourceIpRanges []string `json:"sourceIpRanges,omitempty"` // Subnetwork: This field identifies the subnetwork that the load - // balanced IP should belong to for this Forwarding Rule, used in - // internal load balancing and network load balancing with IPv6. If the - // network specified is in auto subnet mode, this field is optional. - // However, a subnetwork must be specified if the network is in custom - // subnet mode or when creating external forwarding rule with IPv6. + // balanced IP should belong to for this forwarding rule, used with + // internal load balancers and external passthrough Network Load + // Balancers with IPv6. If the network specified is in auto subnet mode, + // this field is optional. However, a subnetwork must be specified if + // the network is in custom subnet mode or when creating external + // forwarding rule with IPv6. Subnetwork string `json:"subnetwork,omitempty"` // Target: The URL of the target resource to receive the matched @@ -15582,9 +15690,9 @@ func (s *ForwardingRuleReference) MarshalJSON() ([]byte, error) { } // ForwardingRuleServiceDirectoryRegistration: Describes the -// auto-registration of the Forwarding Rule to Service Directory. The +// auto-registration of the forwarding rule to Service Directory. The // region and project of the Service Directory resource generated from -// this registration will be the same as this Forwarding Rule. +// this registration will be the same as this forwarding rule. type ForwardingRuleServiceDirectoryRegistration struct { // Namespace: Service Directory namespace to register the forwarding // rule under. @@ -15596,8 +15704,8 @@ type ForwardingRuleServiceDirectoryRegistration struct { // ServiceDirectoryRegion: [Optional] Service Directory region to // register this global forwarding rule under. Default to "us-central1". - // Only used for PSC for Google APIs. All PSC for Google APIs Forwarding - // Rules on the same network should use the same Service Directory + // Only used for PSC for Google APIs. All PSC for Google APIs forwarding + // rules on the same network should use the same Service Directory // region. ServiceDirectoryRegion string `json:"serviceDirectoryRegion,omitempty"` @@ -15797,16 +15905,17 @@ func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { type FutureReservation struct { // AutoCreatedReservationsDeleteTime: Future timestamp when the FR - // auto-created reservations will be deleted by GCE. Format of this - // field must be a valid + // auto-created reservations will be deleted by Compute Engine. Format + // of this field must be a valid // href="https://www.ietf.org/rfc/rfc3339.txt">RFC3339 value. AutoCreatedReservationsDeleteTime string `json:"autoCreatedReservationsDeleteTime,omitempty"` // AutoCreatedReservationsDuration: Specifies the duration of // auto-created reservations. It represents relative time to future // reservation start_time when auto-created reservations will be - // automatically deleted by GCE. Duration time unit is represented as a - // count of seconds and fractions of seconds at nanosecond resolution. + // automatically deleted by Compute Engine. Duration time unit is + // represented as a count of seconds and fractions of seconds at + // nanosecond resolution. AutoCreatedReservationsDuration *Duration `json:"autoCreatedReservationsDuration,omitempty"` // AutoDeleteAutoCreatedReservations: Setting for enabling or disabling @@ -16805,7 +16914,7 @@ type GRPCHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -16815,7 +16924,7 @@ type GRPCHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -17315,7 +17424,7 @@ type HTTP2HealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -17325,7 +17434,7 @@ type HTTP2HealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -17425,7 +17534,7 @@ type HTTPHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Also supported in // legacy HTTP health checks for target pools. The health check supports // all backends supported by the backend service provided the backend @@ -17535,7 +17644,7 @@ type HTTPSHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -17545,7 +17654,7 @@ type HTTPSHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -17631,20 +17740,14 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { // (/compute/docs/reference/rest/alpha/regionHealthChecks) * Global // (/compute/docs/reference/rest/alpha/healthChecks) These health check // resources can be used for load balancing and for autohealing VMs in a -// managed instance group (MIG). **Load balancing** The following load -// balancer can use either regional or global health check: * Internal -// TCP/UDP load balancer The following load balancers require regional -// health check: * Internal HTTP(S) load balancer * Backend -// service-based network load balancer Traffic Director and the -// following load balancers require global health check: * External -// HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load -// balancer The following load balancer require legacy HTTP health -// checks (/compute/docs/reference/rest/v1/httpHealthChecks): * Target -// pool-based network load balancer **Autohealing in MIGs** The health -// checks that you use for autohealing VMs in a MIG can be either -// regional or global. For more information, see Set up an application -// health check and autohealing. For more information, see Health checks -// overview. +// managed instance group (MIG). **Load balancing** Health check +// requirements vary depending on the type of load balancer. For details +// about the type of health check supported for each load balancer and +// corresponding backend type, see Health checks overview: Load balancer +// guide. **Autohealing in MIGs** The health checks that you use for +// autohealing VMs in a MIG can be either regional or global. For more +// information, see Set up an application health check and autohealing. +// For more information, see Health checks overview. type HealthCheck struct { // CheckIntervalSec: How often (in seconds) to send a health check. The // default value is 5 seconds. @@ -20964,6 +21067,9 @@ type Image struct { // imageFamilyViews.get method. RolloutOverride *RolloutPolicy `json:"rolloutOverride,omitempty"` + // SatisfiesPzi: Output only. Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -21617,6 +21723,9 @@ type Instance struct { // corresponding input only field. ResourceStatus *ResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzi: [Output Only] Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -22604,6 +22713,10 @@ type InstanceGroupManager struct { // complementary to this Instance Group Manager. NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + // Params: Input only. Additional params passed with the request, but + // not persisted as part of resource payload. + Params *InstanceGroupManagerParams `json:"params,omitempty"` + // Region: [Output Only] The URL of the region where the managed // instance group resides (for regional resources). Region string `json:"region,omitempty"` @@ -23505,6 +23618,40 @@ func (s *InstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupManagerParams: Input only additional params for instance +// group manager creation. +type InstanceGroupManagerParams struct { + // ResourceManagerTags: Resource manager tags to be bound to the + // instance group manager. Tag keys and values have the same definition + // as resource manager tags. Keys must be in the format `tagKeys/123`, + // and values are in the format `tagValues/456`. The field is allowed + // for INSERT only. + ResourceManagerTags map[string]string `json:"resourceManagerTags,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ResourceManagerTags") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ResourceManagerTags") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerParams) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagerParams + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceGroupManagerResizeRequest: InstanceGroupManagerResizeRequest // represents a request to create a number of VMs: either immediately or // by queuing the request for the specified time. This resize request is @@ -23939,14 +24086,15 @@ func (s *InstanceGroupManagerResizeRequestsListResponseWarningData) MarshalJSON( type InstanceGroupManagerStandbyPolicy struct { InitialDelaySec int64 `json:"initialDelaySec,omitempty"` - // Mode: Defines behaviour of using instances from standby pool to - // resize MIG. + // Mode: Defines how a MIG resumes or starts VMs from a standby pool + // when the group scales out. The default mode is `MANUAL`. // // Possible values: - // "MANUAL" - MIG does not automatically stop/start or suspend/resume - // VMs. - // "SCALE_OUT_POOL" - MIG automatically resumes and starts VMs when it - // scales out, and replenishes the standby pool afterwards. + // "MANUAL" - MIG does not automatically resume or start VMs in the + // standby pool when the group scales out. + // "SCALE_OUT_POOL" - MIG automatically resumes or starts VMs in the + // standby pool when the group scales out, and replenishes the standby + // pool afterwards. Mode string `json:"mode,omitempty"` // ForceSendFields is a list of field names (e.g. "InitialDelaySec") to @@ -27494,6 +27642,34 @@ func (s *InstanceWithNamedPorts) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstancesAddNetworkInterfaceRequest struct { + // NetworkInterface: The new network interface to add. + NetworkInterface *NetworkInterface `json:"network_interface,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkInterface") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NetworkInterface") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstancesAddNetworkInterfaceRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesAddNetworkInterfaceRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstancesAddResourcePoliciesRequest struct { // ResourcePolicies: Resource policies to be added to this instance. ResourcePolicies []string `json:"resourcePolicies,omitempty"` @@ -28232,6 +28408,9 @@ type InstantSnapshot struct { // snapshot resource. ResourceStatus *InstantSnapshotResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzi: Output only. Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -33465,6 +33644,9 @@ type MachineImage struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // SatisfiesPzi: Output only. Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -54993,6 +55175,10 @@ type RouterStatusBgpPeerStatus struct { BfdStatus *BfdStatus `json:"bfdStatus,omitempty"` + // EnableIpv4: Enable IPv4 traffic over BGP Peer. It is enabled by + // default if the peerIpAddress is version 4. + EnableIpv4 bool `json:"enableIpv4,omitempty"` + // EnableIpv6: Enable IPv6 traffic over BGP Peer. If not specified, it // is disabled by default. EnableIpv6 bool `json:"enableIpv6,omitempty"` @@ -55000,6 +55186,9 @@ type RouterStatusBgpPeerStatus struct { // IpAddress: IP address of the local BGP interface. IpAddress string `json:"ipAddress,omitempty"` + // Ipv4NexthopAddress: IPv4 address of the local BGP interface. + Ipv4NexthopAddress string `json:"ipv4NexthopAddress,omitempty"` + // Ipv6NexthopAddress: IPv6 address of the local BGP interface. Ipv6NexthopAddress string `json:"ipv6NexthopAddress,omitempty"` @@ -55019,6 +55208,9 @@ type RouterStatusBgpPeerStatus struct { // PeerIpAddress: IP address of the remote BGP interface. PeerIpAddress string `json:"peerIpAddress,omitempty"` + // PeerIpv4NexthopAddress: IPv4 address of the remote BGP interface. + PeerIpv4NexthopAddress string `json:"peerIpv4NexthopAddress,omitempty"` + // PeerIpv6NexthopAddress: IPv6 address of the remote BGP interface. PeerIpv6NexthopAddress string `json:"peerIpv6NexthopAddress,omitempty"` @@ -55043,9 +55235,16 @@ type RouterStatusBgpPeerStatus struct { // StatusReason: Indicates why particular status was returned. // // Possible values: + // "IPV4_PEER_ON_IPV6_ONLY_CONNECTION" - BGP peer disabled because it + // requires IPv4 but the underlying connection is IPv6-only. + // "IPV6_PEER_ON_IPV4_ONLY_CONNECTION" - BGP peer disabled because it + // requires IPv6 but the underlying connection is IPv4-only. // "MD5_AUTH_INTERNAL_PROBLEM" - Indicates internal problems with // configuration of MD5 authentication. This particular reason can only // be returned when md5AuthEnabled is true and status is DOWN. + // "MISSING_NETWORK_CONNECTIVITY_CENTER_SPOKE" - BGP peer disabled + // because it is not labeled as an NCC spoke. Currently, BGP peers using + // SD-WAN connectivity will be disabled for this reason. // "STATUS_REASON_UNSPECIFIED" StatusReason string `json:"statusReason,omitempty"` @@ -55915,7 +56114,7 @@ type SSLHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -55925,7 +56124,7 @@ type SSLHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -57245,10 +57444,20 @@ type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfi AutoDeployLoadThreshold float64 `json:"autoDeployLoadThreshold,omitempty"` + DetectionAbsoluteQps float64 `json:"detectionAbsoluteQps,omitempty"` + + DetectionLoadThreshold float64 `json:"detectionLoadThreshold,omitempty"` + + DetectionRelativeToBaselineQps float64 `json:"detectionRelativeToBaselineQps,omitempty"` + // Name: The name must be 1-63 characters long, and comply with RFC1035. // The name must be unique within the security policy. Name string `json:"name,omitempty"` + // TrafficGranularityConfigs: Configuration options for enabling + // Adaptive Protection to operate on specified granular traffic units. + TrafficGranularityConfigs []*SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig `json:"trafficGranularityConfigs,omitempty"` + // ForceSendFields is a list of field names (e.g. // "AutoDeployConfidenceThreshold") to unconditionally include in API // requests. By default, fields with empty or default values are omitted @@ -57280,6 +57489,9 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdC AutoDeployConfidenceThreshold gensupport.JSONFloat64 `json:"autoDeployConfidenceThreshold"` AutoDeployImpactedBaselineThreshold gensupport.JSONFloat64 `json:"autoDeployImpactedBaselineThreshold"` AutoDeployLoadThreshold gensupport.JSONFloat64 `json:"autoDeployLoadThreshold"` + DetectionAbsoluteQps gensupport.JSONFloat64 `json:"detectionAbsoluteQps"` + DetectionLoadThreshold gensupport.JSONFloat64 `json:"detectionLoadThreshold"` + DetectionRelativeToBaselineQps gensupport.JSONFloat64 `json:"detectionRelativeToBaselineQps"` *NoMethod } s1.NoMethod = (*NoMethod)(s) @@ -57289,9 +57501,58 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdC s.AutoDeployConfidenceThreshold = float64(s1.AutoDeployConfidenceThreshold) s.AutoDeployImpactedBaselineThreshold = float64(s1.AutoDeployImpactedBaselineThreshold) s.AutoDeployLoadThreshold = float64(s1.AutoDeployLoadThreshold) + s.DetectionAbsoluteQps = float64(s1.DetectionAbsoluteQps) + s.DetectionLoadThreshold = float64(s1.DetectionLoadThreshold) + s.DetectionRelativeToBaselineQps = float64(s1.DetectionRelativeToBaselineQps) return nil } +// SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThreshold +// ConfigTrafficGranularityConfig: Configurations to specifc granular +// traffic units processed by Adaptive Protection. +type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig struct { + // EnableEachUniqueValue: If enabled, traffic matching each unique value + // for the specified type constitutes a separate traffic unit. It can + // only be set to true if `value` is empty. + EnableEachUniqueValue bool `json:"enableEachUniqueValue,omitempty"` + + // Type: Type of this configuration. + // + // Possible values: + // "HTTP_HEADER_HOST" + // "HTTP_PATH" + // "UNSPECIFIED_TYPE" + Type string `json:"type,omitempty"` + + // Value: Requests that match this value constitute a granular traffic + // unit. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EnableEachUniqueValue") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EnableEachUniqueValue") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type SecurityPolicyAdvancedOptionsConfig struct { // JsonCustomConfig: Custom configuration to apply the JSON parsing. // Only applicable when json_parsing is set to STANDARD. @@ -58446,7 +58707,13 @@ type SecurityPolicyRuleRateLimitOptions struct { // Server name indication in the TLS session of the HTTPS request. The // key value is truncated to the first 128 bytes. The key type defaults // to ALL on a HTTP session. - REGION_CODE: The country/region from - // which the request originates. + // which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL + // fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If + // not available, the key type defaults to ALL. - USER_IP: The IP + // address of the originating client, which is resolved based on + // "userIpRequestHeaders" configured with the security policy. If there + // is no "userIpRequestHeaders" configuration or an IP address cannot be + // resolved from it, the key type defaults to IP. // // Possible values: // "ALL" @@ -58457,6 +58724,8 @@ type SecurityPolicyRuleRateLimitOptions struct { // "IP" // "REGION_CODE" // "SNI" + // "TLS_JA3_FINGERPRINT" + // "USER_IP" // "XFF_IP" EnforceOnKey string `json:"enforceOnKey,omitempty"` @@ -58547,7 +58816,14 @@ type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { // bytes. - SNI: Server name indication in the TLS session of the HTTPS // request. The key value is truncated to the first 128 bytes. The key // type defaults to ALL on a HTTP session. - REGION_CODE: The - // country/region from which the request originates. + // country/region from which the request originates. - + // TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects + // using HTTPS, HTTP/2 or HTTP/3. If not available, the key type + // defaults to ALL. - USER_IP: The IP address of the originating client, + // which is resolved based on "userIpRequestHeaders" configured with the + // security policy. If there is no "userIpRequestHeaders" configuration + // or an IP address cannot be resolved from it, the key type defaults to + // IP. // // Possible values: // "ALL" @@ -58558,6 +58834,8 @@ type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { // "IP" // "REGION_CODE" // "SNI" + // "TLS_JA3_FINGERPRINT" + // "USER_IP" // "XFF_IP" EnforceOnKeyType string `json:"enforceOnKeyType,omitempty"` @@ -60526,6 +60804,9 @@ type Snapshot struct { // Only applicable for regional snapshots. Region string `json:"region,omitempty"` + // SatisfiesPzi: Output only. Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -62875,9 +63156,9 @@ func (s *SslPoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { } // SslPolicy: Represents an SSL Policy resource. Use SSL policies to -// control the SSL features, such as versions and cipher suites, offered -// by an HTTPS or SSL Proxy load balancer. For more information, read -// SSL Policy Concepts. +// control SSL features, such as versions and cipher suites, that are +// offered by Application Load Balancers and proxy Network Load +// Balancers. For more information, read SSL policies overview. type SslPolicy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -66192,7 +66473,7 @@ type TCPHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -66202,7 +66483,7 @@ type TCPHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -68787,10 +69068,10 @@ func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { } // TargetPool: Represents a Target Pool resource. Target pools are used -// for network TCP/UDP load balancing. A target pool references member -// instances, an associated legacy HttpHealthCheck resource, and, -// optionally, a backup target pool. For more information, read Using -// target pools. +// with external passthrough Network Load Balancers. A target pool +// references member instances, an associated legacy HttpHealthCheck +// resource, and, optionally, a backup target pool. For more +// information, read Using target pools. type TargetPool struct { // BackupPool: The server-defined URL for the resource. This field is // applicable only when the containing target pool is serving a @@ -69810,10 +70091,10 @@ func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error } // TargetSslProxy: Represents a Target SSL Proxy resource. A target SSL -// proxy is a component of a SSL Proxy load balancer. Global forwarding -// rules reference a target SSL proxy, and the target proxy then -// references an external backend service. For more information, read -// Using Target Proxies. +// proxy is a component of a Proxy Network Load Balancer. The forwarding +// rule references the target SSL proxy, and the target proxy then +// references a backend service. For more information, read Proxy +// Network Load Balancer overview. type TargetSslProxy struct { // CertificateMap: URL of a certificate map that identifies a // certificate map associated with the given target proxy. This field @@ -70327,10 +70608,10 @@ func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { } // TargetTcpProxy: Represents a Target TCP Proxy resource. A target TCP -// proxy is a component of a TCP Proxy load balancer. Global forwarding -// rules reference target TCP proxy, and the target proxy then -// references an external backend service. For more information, read -// TCP Proxy Load Balancing overview. +// proxy is a component of a Proxy Network Load Balancer. The forwarding +// rule references the target TCP proxy, and the target proxy then +// references a backend service. For more information, read Proxy +// Network Load Balancer overview. type TargetTcpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -115423,8 +115704,8 @@ type InstanceGroupManagersListManagedInstancesCall struct { // instance, the currentAction is CREATING. If a previous action failed, // the list displays the errors for that failed action. The orderBy // query parameter is not supported. The `pageToken` query parameter is -// supported only in the alpha and beta API and only if the group's -// `listManagedInstancesResults` field is set to `PAGINATED`. +// supported only if the group's `listManagedInstancesResults` field is +// set to `PAGINATED`. // // - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. @@ -115610,7 +115891,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.listManagedInstances", @@ -123276,6 +123557,194 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } +// method id "compute.instances.addNetworkInterface": + +type InstancesAddNetworkInterfaceCall struct { + s *Service + project string + zone string + instance string + instancesaddnetworkinterfacerequest *InstancesAddNetworkInterfaceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddNetworkInterface: Adds a network interface to an instance. +// +// - instance: The instance name for this request stored as resource_id. +// Name should conform to RFC1035 or be an unsigned long integer. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) AddNetworkInterface(project string, zone string, instance string, instancesaddnetworkinterfacerequest *InstancesAddNetworkInterfaceRequest) *InstancesAddNetworkInterfaceCall { + c := &InstancesAddNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.instancesaddnetworkinterfacerequest = instancesaddnetworkinterfacerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesAddNetworkInterfaceCall) RequestId(requestId string) *InstancesAddNetworkInterfaceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesAddNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesAddNetworkInterfaceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesAddNetworkInterfaceCall) Context(ctx context.Context) *InstancesAddNetworkInterfaceCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesAddNetworkInterfaceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesAddNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesaddnetworkinterfacerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/addNetworkInterface") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.addNetworkInterface" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesAddNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds a network interface to an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/addNetworkInterface", + // "httpMethod": "POST", + // "id": "compute.instances.addNetworkInterface", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "The instance name for this request stored as resource_id. Name should conform to RFC1035 or be an unsigned long integer.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/addNetworkInterface", + // "request": { + // "$ref": "InstancesAddNetworkInterfaceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.addResourcePolicies": type InstancesAddResourcePoliciesCall struct { @@ -131939,9 +132408,11 @@ func (r *InstancesService) Stop(project string, zone string, instance string) *I return c } -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false. +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": This +// property is required if the instance has any attached Local SSD +// disks. If false, Local SSD data will be preserved when the instance +// is suspended. If true, the contents of any attached Local SSD disks +// will be discarded. func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c @@ -132069,7 +132540,7 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // ], // "parameters": { // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", // "location": "query", // "type": "boolean" // }, @@ -132149,9 +132620,11 @@ func (r *InstancesService) Suspend(project string, zone string, instance string) return c } -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false. +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": This +// property is required if the instance has any attached Local SSD +// disks. If false, Local SSD data will be preserved when the instance +// is suspended. If true, the contents of any attached Local SSD disks +// will be discarded. func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c @@ -132272,7 +132745,7 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err // ], // "parameters": { // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", // "location": "query", // "type": "boolean" // }, @@ -185690,9 +186163,8 @@ type RegionInstanceGroupManagersListManagedInstancesCall struct { // group and instances that are scheduled to be created. The list // includes any current actions that the group has scheduled for its // instances. The orderBy query parameter is not supported. The -// `pageToken` query parameter is supported only in the alpha and beta -// API and only if the group's `listManagedInstancesResults` field is -// set to `PAGINATED`. +// `pageToken` query parameter is supported only if the group's +// `listManagedInstancesResults` field is set to `PAGINATED`. // // - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. @@ -185877,7 +186349,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.listManagedInstances", @@ -246294,8 +246766,8 @@ type TargetSslProxiesSetSslPolicyCall struct { // SetSslPolicy: Sets the SSL policy for TargetSslProxy. The SSL policy // specifies the server-side support for SSL features. This affects -// connections between clients and the SSL proxy load balancer. They do -// not affect the connection between the load balancer and the backends. +// connections between clients and the load balancer. They do not affect +// the connection between the load balancer and the backends. // // - project: Project ID for this request. // - targetSslProxy: Name of the TargetSslProxy resource whose SSL @@ -246417,7 +246889,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the load balancer. They do not affect the connection between the load balancer and the backends.", // "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", // "httpMethod": "POST", // "id": "compute.targetSslProxies.setSslPolicy", diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json index fae8744d0c..25c230b573 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json @@ -9067,7 +9067,7 @@ ] }, "listManagedInstances": { - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listManagedInstances", @@ -12986,7 +12986,7 @@ ], "parameters": { "discardLocalSsd": { - "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", "location": "query", "type": "boolean" }, @@ -13038,7 +13038,7 @@ ], "parameters": { "discardLocalSsd": { - "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", "location": "query", "type": "boolean" }, @@ -24945,7 +24945,7 @@ ] }, "listManagedInstances": { - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.listManagedInstances", @@ -37938,7 +37938,7 @@ ] }, "setSslPolicy": { - "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the load balancer. They do not affect the connection between the load balancer and the backends.", "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", "httpMethod": "POST", "id": "compute.targetSslProxies.setSslPolicy", @@ -40215,7 +40215,7 @@ } } }, - "revision": "20231110", + "revision": "20231128", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -43278,7 +43278,7 @@ "id": "BackendService", "properties": { "affinityCookieTtlSec": { - "description": "Lifetime of cookies in seconds. This setting is applicable to external and internal HTTP(S) load balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "Lifetime of cookies in seconds. This setting is applicable to Application Load Balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, @@ -43313,7 +43313,7 @@ }, "connectionTrackingPolicy": { "$ref": "BackendServiceConnectionTrackingPolicy", - "description": "Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for Network Load Balancing and Internal TCP/UDP Load Balancing." + "description": "Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for external passthrough Network Load Balancers and internal passthrough Network Load Balancers." }, "consistentHash": { "$ref": "ConsistentHashLoadBalancerSettings", @@ -43346,12 +43346,12 @@ "type": "string" }, "enableCDN": { - "description": "If true, enables Cloud CDN for the backend service of an external HTTP(S) load balancer.", + "description": "If true, enables Cloud CDN for the backend service of a global external Application Load Balancer.", "type": "boolean" }, "failoverPolicy": { "$ref": "BackendServiceFailoverPolicy", - "description": "Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview)." + "description": "Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview)." }, "fingerprint": { "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a BackendService.", @@ -43367,7 +43367,7 @@ }, "iap": { "$ref": "BackendServiceIAP", - "description": "The configurations for Identity-Aware Proxy on this resource. Not available for Internal TCP/UDP Load Balancing and Network Load Balancing." + "description": "The configurations for Identity-Aware Proxy on this resource. Not available for internal passthrough Network Load Balancers and external passthrough Network Load Balancers." }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -43375,7 +43375,7 @@ "type": "string" }, "ipAddressSelectionPolicy": { - "description": "Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ", + "description": "Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced global external Application Load Balancer (load balancing scheme EXTERNAL_MANAGED), - Regional external Application Load Balancer, - Internal proxy Network Load Balancer (load balancing scheme INTERNAL_MANAGED), - Regional internal Application Load Balancer (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ", "enum": [ "IPV4_ONLY", "IPV6_ONLY", @@ -43406,10 +43406,10 @@ "INVALID_LOAD_BALANCING_SCHEME" ], "enumDescriptions": [ - "Signifies that this will be used for external HTTP(S), SSL Proxy, TCP Proxy, or Network Load Balancing", - "Signifies that this will be used for External Managed HTTP(S) Load Balancing.", - "Signifies that this will be used for Internal TCP/UDP Load Balancing.", - "Signifies that this will be used for Internal HTTP(S) Load Balancing.", + "Signifies that this will be used for classic Application Load Balancers, global external proxy Network Load Balancers, or external passthrough Network Load Balancers.", + "Signifies that this will be used for global external Application Load Balancers, regional external Application Load Balancers, or regional external proxy Network Load Balancers.", + "Signifies that this will be used for internal passthrough Network Load Balancers.", + "Signifies that this will be used for internal Application Load Balancers.", "Signifies that this will be used by Traffic Director.", "" ], @@ -43476,12 +43476,12 @@ }, "port": { "deprecated": true, - "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port.", + "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For internal passthrough Network Load Balancers and external passthrough Network Load Balancers, omit port.", "format": "int32", "type": "integer" }, "portName": { - "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port_name.", + "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For internal passthrough Network Load Balancers and external passthrough Network Load Balancers, omit port_name.", "type": "string" }, "protocol": { @@ -43864,11 +43864,11 @@ "type": "string" }, "enableStrongAffinity": { - "description": "Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly.", + "description": "Enable Strong Session Affinity for external passthrough Network Load Balancers. This option is not available publicly.", "type": "boolean" }, "idleTimeoutSec": { - "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly.", + "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For internal passthrough Network Load Balancers: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For external passthrough Network Load Balancers the default is 60 seconds. This option is not available publicly.", "format": "int32", "type": "integer" }, @@ -43890,7 +43890,7 @@ "type": "object" }, "BackendServiceFailoverPolicy": { - "description": "For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", + "description": "For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", "id": "BackendServiceFailoverPolicy", "properties": { "disableConnectionDrainOnFailover": { @@ -43898,7 +43898,7 @@ "type": "boolean" }, "dropTrafficIfUnhealthy": { - "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.", + "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.", "type": "boolean" }, "failoverRatio": { @@ -48815,7 +48815,7 @@ "type": "object" }, "ForwardingRule": { - "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/beta/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/beta/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud Platform load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.", + "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/beta/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/beta/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.", "id": "ForwardingRule", "properties": { "IPAddress": { @@ -48849,7 +48849,7 @@ "type": "boolean" }, "allowGlobalAccess": { - "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.", + "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the internal passthrough Network Load Balancers, the regional internal Application Load Balancer, and the regional internal proxy Network Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.", "type": "boolean" }, "allowPscGlobalAccess": { @@ -48861,11 +48861,11 @@ "type": "boolean" }, "backendService": { - "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for Internal TCP/UDP Load Balancing and Network Load Balancing; must be omitted for all other load balancer types.", + "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for internal and external passthrough Network Load Balancers; must be omitted for all other load balancer types.", "type": "string" }, "baseForwardingRule": { - "description": "[Output Only] The URL for the corresponding base Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the same IP address, protocol, and port settings with the current Forwarding Rule, but without sourceIPRanges specified. Always empty if the current Forwarding Rule does not have sourceIPRanges specified.", + "description": "[Output Only] The URL for the corresponding base forwarding rule. By base forwarding rule, we mean the forwarding rule that has the same IP address, protocol, and port settings with the current forwarding rule, but without sourceIPRanges specified. Always empty if the current forwarding rule does not have sourceIPRanges specified.", "type": "string" }, "creationTimestamp": { @@ -48906,7 +48906,7 @@ }, "kind": { "default": "compute#forwardingRule", - "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", + "description": "[Output Only] Type of the resource. Always compute#forwardingRule for forwarding rule resources.", "type": "string" }, "labelFingerprint": { @@ -48954,7 +48954,7 @@ "type": "string" }, "network": { - "description": "This field is not used for global external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "description": "This field is not used for global external load balancing. For internal passthrough Network Load Balancers, this field identifies the network that the load balanced IP should belong to for this forwarding rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", "type": "string" }, "networkTier": { @@ -48989,7 +48989,7 @@ "type": "array" }, "pscConnectionId": { - "description": "[Output Only] The PSC connection id of the PSC Forwarding Rule.", + "description": "[Output Only] The PSC connection id of the PSC forwarding rule.", "format": "uint64", "type": "string" }, @@ -49028,23 +49028,23 @@ "type": "array" }, "serviceLabel": { - "description": "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", + "description": "An optional prefix to the service name for this forwarding rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, "serviceName": { - "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.", + "description": "[Output Only] The internal fully qualified service name for this forwarding rule. This field is only used for internal load balancing.", "type": "string" }, "sourceIpRanges": { - "description": "If not empty, this Forwarding Rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a Forwarding Rule can only have up to 64 source IP ranges, and this field can only be used with a regional Forwarding Rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", + "description": "If not empty, this forwarding rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a forwarding rule can only have up to 64 source IP ranges, and this field can only be used with a regional forwarding rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", "items": { "type": "string" }, "type": "array" }, "subnetwork": { - "description": "This field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule, used in internal load balancing and network load balancing with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", + "description": "This field identifies the subnetwork that the load balanced IP should belong to for this forwarding rule, used with internal load balancers and external passthrough Network Load Balancers with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", "type": "string" }, "target": { @@ -49375,7 +49375,7 @@ "type": "object" }, "ForwardingRuleServiceDirectoryRegistration": { - "description": "Describes the auto-registration of the Forwarding Rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this Forwarding Rule.", + "description": "Describes the auto-registration of the forwarding rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this forwarding rule.", "id": "ForwardingRuleServiceDirectoryRegistration", "properties": { "namespace": { @@ -49387,7 +49387,7 @@ "type": "string" }, "serviceDirectoryRegion": { - "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region.", + "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs forwarding rules on the same network should use the same Service Directory region.", "type": "string" } }, @@ -49531,12 +49531,12 @@ "id": "FutureReservation", "properties": { "autoCreatedReservationsDeleteTime": { - "description": "Future timestamp when the FR auto-created reservations will be deleted by GCE. Format of this field must be a valid href=\"https://www.ietf.org/rfc/rfc3339.txt\"\u003eRFC3339 value.", + "description": "Future timestamp when the FR auto-created reservations will be deleted by Compute Engine. Format of this field must be a valid href=\"https://www.ietf.org/rfc/rfc3339.txt\"\u003eRFC3339 value.", "type": "string" }, "autoCreatedReservationsDuration": { "$ref": "Duration", - "description": "Specifies the duration of auto-created reservations. It represents relative time to future reservation start_time when auto-created reservations will be automatically deleted by GCE. Duration time unit is represented as a count of seconds and fractions of seconds at nanosecond resolution." + "description": "Specifies the duration of auto-created reservations. It represents relative time to future reservation start_time when auto-created reservations will be automatically deleted by Compute Engine. Duration time unit is represented as a count of seconds and fractions of seconds at nanosecond resolution." }, "autoDeleteAutoCreatedReservations": { "description": "Setting for enabling or disabling automatic deletion for auto-created reservation. If set to true, auto-created reservations will be deleted at Future Reservation's end time (default) or at user's defined timestamp if any of the [auto_created_reservations_delete_time, auto_created_reservations_duration] values is specified. For keeping auto-created reservation indefinitely, this value should be set to false.", @@ -50295,7 +50295,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -50531,7 +50531,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -50584,7 +50584,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -50637,7 +50637,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -50674,7 +50674,7 @@ "type": "object" }, "HealthCheck": { - "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/beta/regionHealthChecks) * [Global](/compute/docs/reference/rest/beta/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** The following load balancer can use either regional or global health check: * Internal TCP/UDP load balancer The following load balancers require regional health check: * Internal HTTP(S) load balancer * Backend service-based network load balancer Traffic Director and the following load balancers require global health check: * External HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load balancer The following load balancer require [legacy HTTP health checks](/compute/docs/reference/rest/v1/httpHealthChecks): * Target pool-based network load balancer **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.", + "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/beta/regionHealthChecks) * [Global](/compute/docs/reference/rest/beta/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** Health check requirements vary depending on the type of load balancer. For details about the type of health check supported for each load balancer and corresponding backend type, see Health checks overview: Load balancer guide. **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.", "id": "HealthCheck", "properties": { "checkIntervalSec": { @@ -53114,6 +53114,10 @@ "$ref": "ResourceStatus", "description": "[Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field." }, + "satisfiesPzi": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -54227,6 +54231,13 @@ }, "description": "Named instance selections configuring properties that the group will use when creating new VMs.", "type": "object" + }, + "instanceSelections": { + "additionalProperties": { + "$ref": "InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection" + }, + "description": "Named instance selections configuring properties that the group will use when creating new VMs.", + "type": "object" } }, "type": "object" @@ -54732,14 +54743,14 @@ "type": "integer" }, "mode": { - "description": "Defines behaviour of using instances from standby pool to resize MIG.", + "description": "Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. The default mode is `MANUAL`.", "enum": [ "MANUAL", "SCALE_OUT_POOL" ], "enumDescriptions": [ - "MIG does not automatically stop/start or suspend/resume VMs.", - "MIG automatically resumes and starts VMs when it scales out, and replenishes the standby pool afterwards." + "MIG does not automatically resume or start VMs in the standby pool when the group scales out.", + "MIG automatically resumes or starts VMs in the standby pool when the group scales out, and replenishes the standby pool afterwards." ], "type": "string" } @@ -69607,6 +69618,9 @@ "PREEMPTIBLE_NVIDIA_T4_GPUS", "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS", "PREEMPTIBLE_NVIDIA_V100_GPUS", + "PREEMPTIBLE_TPU_LITE_DEVICE_V5", + "PREEMPTIBLE_TPU_LITE_PODSLICE_V5", + "PREEMPTIBLE_TPU_PODSLICE_V4", "PRIVATE_V6_ACCESS_SUBNETWORKS", "PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK", "PSC_INTERNAL_LB_FORWARDING_RULES", @@ -69645,6 +69659,9 @@ "TARGET_SSL_PROXIES", "TARGET_TCP_PROXIES", "TARGET_VPN_GATEWAYS", + "TPU_LITE_DEVICE_V5", + "TPU_LITE_PODSLICE_V5", + "TPU_PODSLICE_V4", "URL_MAPS", "VPN_GATEWAYS", "VPN_TUNNELS", @@ -69781,6 +69798,9 @@ "", "", "", + "", + "", + "", "The total number of snapshots allowed for a single project.", "", "", @@ -69800,6 +69820,9 @@ "", "", "", + "", + "", + "", "" ], "type": "string" @@ -75031,7 +75054,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -75932,10 +75955,58 @@ "format": "float", "type": "number" }, + "detectionAbsoluteQps": { + "format": "float", + "type": "number" + }, + "detectionLoadThreshold": { + "format": "float", + "type": "number" + }, + "detectionRelativeToBaselineQps": { + "format": "float", + "type": "number" + }, "name": { "description": "The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" + }, + "trafficGranularityConfigs": { + "description": "Configuration options for enabling Adaptive Protection to operate on specified granular traffic units.", + "items": { + "$ref": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig": { + "description": "Configurations to specifc granular traffic units processed by Adaptive Protection.", + "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig", + "properties": { + "enableEachUniqueValue": { + "description": "If enabled, traffic matching each unique value for the specified type constitutes a separate traffic unit. It can only be set to true if `value` is empty.", + "type": "boolean" + }, + "type": { + "description": "Type of this configuration.", + "enum": [ + "HTTP_HEADER_HOST", + "HTTP_PATH", + "UNSPECIFIED_TYPE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "value": { + "description": "Requests that match this value constitute a granular traffic unit.", + "type": "string" } }, "type": "object" @@ -76610,7 +76681,7 @@ "type": "string" }, "enforceOnKey": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on \"userIpRequestHeaders\" configured with the security policy. If there is no \"userIpRequestHeaders\" configuration or an IP address cannot be resolved from it, the key type defaults to IP. ", "enum": [ "ALL", "ALL_IPS", @@ -76620,6 +76691,8 @@ "IP", "REGION_CODE", "SNI", + "TLS_JA3_FINGERPRINT", + "USER_IP", "XFF_IP" ], "enumDeprecated": [ @@ -76631,6 +76704,8 @@ false, false, false, + false, + false, false ], "enumDescriptions": [ @@ -76642,6 +76717,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -76680,7 +76757,7 @@ "type": "string" }, "enforceOnKeyType": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on \"userIpRequestHeaders\" configured with the security policy. If there is no \"userIpRequestHeaders\" configuration or an IP address cannot be resolved from it, the key type defaults to IP. ", "enum": [ "ALL", "ALL_IPS", @@ -76690,6 +76767,8 @@ "IP", "REGION_CODE", "SNI", + "TLS_JA3_FINGERPRINT", + "USER_IP", "XFF_IP" ], "enumDeprecated": [ @@ -76701,6 +76780,8 @@ false, false, false, + false, + false, false ], "enumDescriptions": [ @@ -76712,6 +76793,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -79409,7 +79492,7 @@ "type": "object" }, "SslPolicy": { - "description": "Represents an SSL Policy resource. Use SSL policies to control the SSL features, such as versions and cipher suites, offered by an HTTPS or SSL Proxy load balancer. For more information, read SSL Policy Concepts.", + "description": "Represents an SSL Policy resource. Use SSL policies to control SSL features, such as versions and cipher suites, that are offered by Application Load Balancers and proxy Network Load Balancers. For more information, read SSL policies overview.", "id": "SslPolicy", "properties": { "creationTimestamp": { @@ -80487,7 +80570,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -82362,7 +82445,7 @@ "type": "object" }, "TargetPool": { - "description": "Represents a Target Pool resource. Target pools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools.", + "description": "Represents a Target Pool resource. Target pools are used with external passthrough Network Load Balancers. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools.", "id": "TargetPool", "properties": { "backupPool": { @@ -83025,7 +83108,7 @@ "type": "object" }, "TargetSslProxy": { - "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a SSL Proxy load balancer. Global forwarding rules reference a target SSL proxy, and the target proxy then references an external backend service. For more information, read Using Target Proxies.", + "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target SSL proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview.", "id": "TargetSslProxy", "properties": { "certificateMap": { @@ -83404,7 +83487,7 @@ "type": "object" }, "TargetTcpProxy": { - "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a TCP Proxy load balancer. Global forwarding rules reference target TCP proxy, and the target proxy then references an external backend service. For more information, read TCP Proxy Load Balancing overview.", + "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target TCP proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview.", "id": "TargetTcpProxy", "properties": { "creationTimestamp": { diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go index 58754f5708..e5749a7807 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go @@ -5863,13 +5863,13 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { // For more information, see Backend Services. type BackendService struct { // AffinityCookieTtlSec: Lifetime of cookies in seconds. This setting is - // applicable to external and internal HTTP(S) load balancers and - // Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session - // affinity. If set to 0, the cookie is non-persistent and lasts only - // until the end of the browser session (or equivalent). The maximum - // allowed value is two weeks (1,209,600). Not supported when the - // backend service is referenced by a URL map that is bound to target - // gRPC proxy that has validateForProxyless field set to true. + // applicable to Application Load Balancers and Traffic Director and + // requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to + // 0, the cookie is non-persistent and lasts only until the end of the + // browser session (or equivalent). The maximum allowed value is two + // weeks (1,209,600). Not supported when the backend service is + // referenced by a URL map that is bound to target gRPC proxy that has + // validateForProxyless field set to true. AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` // Backends: The list of backends that serve this BackendService. @@ -5895,8 +5895,8 @@ type BackendService struct { // ConnectionTrackingPolicy: Connection Tracking configuration for this // BackendService. Connection tracking policy settings are only - // available for Network Load Balancing and Internal TCP/UDP Load - // Balancing. + // available for external passthrough Network Load Balancers and + // internal passthrough Network Load Balancers. ConnectionTrackingPolicy *BackendServiceConnectionTrackingPolicy `json:"connectionTrackingPolicy,omitempty"` // ConsistentHash: Consistent Hash-based load balancing can be used to @@ -5935,15 +5935,15 @@ type BackendService struct { // security policy associated with this backend service. EdgeSecurityPolicy string `json:"edgeSecurityPolicy,omitempty"` - // EnableCDN: If true, enables Cloud CDN for the backend service of an - // external HTTP(S) load balancer. + // EnableCDN: If true, enables Cloud CDN for the backend service of a + // global external Application Load Balancer. EnableCDN bool `json:"enableCDN,omitempty"` // FailoverPolicy: Requires at least one backend instance group to be // defined as a backup (failover) backend. For load balancers that have - // configurable failover: Internal TCP/UDP Load Balancing + // configurable failover: Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) - // and external TCP/UDP Load Balancing + // and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). FailoverPolicy *BackendServiceFailoverPolicy `json:"failoverPolicy,omitempty"` @@ -5967,8 +5967,8 @@ type BackendService struct { HealthChecks []string `json:"healthChecks,omitempty"` // Iap: The configurations for Identity-Aware Proxy on this resource. - // Not available for Internal TCP/UDP Load Balancing and Network Load - // Balancing. + // Not available for internal passthrough Network Load Balancers and + // external passthrough Network Load Balancers. Iap *BackendServiceIAP `json:"iap,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -5988,13 +5988,13 @@ type BackendService struct { // backend service (Instance Group, Managed Instance Group, Network // Endpoint Group), regardless of traffic from the client to the proxy. // Only IPv6 health checks are used to check the health of the backends. - // This field is applicable to either: - Advanced Global External HTTPS - // Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional - // External HTTPS Load Balancing, - Internal TCP Proxy (load balancing - // scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing - // (load balancing scheme INTERNAL_MANAGED), - Traffic Director with - // Envoy proxies and proxyless gRPC (load balancing scheme - // INTERNAL_SELF_MANAGED). + // This field is applicable to either: - Advanced global external + // Application Load Balancer (load balancing scheme EXTERNAL_MANAGED), - + // Regional external Application Load Balancer, - Internal proxy Network + // Load Balancer (load balancing scheme INTERNAL_MANAGED), - Regional + // internal Application Load Balancer (load balancing scheme + // INTERNAL_MANAGED), - Traffic Director with Envoy proxies and + // proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). // // Possible values: // "IPV4_ONLY" - Only send IPv4 traffic to the backends of the Backend @@ -6022,14 +6022,16 @@ type BackendService struct { // another. For more information, refer to Choosing a load balancer. // // Possible values: - // "EXTERNAL" - Signifies that this will be used for external HTTP(S), - // SSL Proxy, TCP Proxy, or Network Load Balancing - // "EXTERNAL_MANAGED" - Signifies that this will be used for External - // Managed HTTP(S) Load Balancing. - // "INTERNAL" - Signifies that this will be used for Internal TCP/UDP - // Load Balancing. - // "INTERNAL_MANAGED" - Signifies that this will be used for Internal - // HTTP(S) Load Balancing. + // "EXTERNAL" - Signifies that this will be used for classic + // Application Load Balancers, global external proxy Network Load + // Balancers, or external passthrough Network Load Balancers. + // "EXTERNAL_MANAGED" - Signifies that this will be used for global + // external Application Load Balancers, regional external Application + // Load Balancers, or regional external proxy Network Load Balancers. + // "INTERNAL" - Signifies that this will be used for internal + // passthrough Network Load Balancers. + // "INTERNAL_MANAGED" - Signifies that this will be used for internal + // Application Load Balancers. // "INTERNAL_SELF_MANAGED" - Signifies that this will be used by // Traffic Director. // "INVALID_LOAD_BALANCING_SCHEME" @@ -6171,16 +6173,18 @@ type BackendService struct { OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` // Port: Deprecated in favor of portName. The TCP port to connect on the - // backend. The default value is 80. For Internal TCP/UDP Load Balancing - // and Network Load Balancing, omit port. + // backend. The default value is 80. For internal passthrough Network + // Load Balancers and external passthrough Network Load Balancers, omit + // port. Port int64 `json:"port,omitempty"` // PortName: A named port on a backend instance group representing the // port for communication to the backend VMs in that group. The named // port must be defined on each backend instance group // (https://cloud.google.com/load-balancing/docs/backend-service#named_ports). - // This parameter has no meaning if the backends are NEGs. For Internal - // TCP/UDP Load Balancing and Network Load Balancing, omit port_name. + // This parameter has no meaning if the backends are NEGs. For internal + // passthrough Network Load Balancers and external passthrough Network + // Load Balancers, omit port_name. PortName string `json:"portName,omitempty"` // Protocol: The protocol this BackendService uses to communicate with @@ -6769,18 +6773,19 @@ type BackendServiceConnectionTrackingPolicy struct { // "NEVER_PERSIST" ConnectionPersistenceOnUnhealthyBackends string `json:"connectionPersistenceOnUnhealthyBackends,omitempty"` - // EnableStrongAffinity: Enable Strong Session Affinity for Network Load - // Balancing. This option is not available publicly. + // EnableStrongAffinity: Enable Strong Session Affinity for external + // passthrough Network Load Balancers. This option is not available + // publicly. EnableStrongAffinity bool `json:"enableStrongAffinity,omitempty"` // IdleTimeoutSec: Specifies how long to keep a Connection Tracking - // entry while there is no matching traffic (in seconds). For Internal - // TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the - // maximum is 16 hours. - It can be set only if Connection Tracking is - // less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, - // CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For - // Network Load Balancer the default is 60 seconds. This option is not - // available publicly. + // entry while there is no matching traffic (in seconds). For internal + // passthrough Network Load Balancers: - The minimum (default) is 10 + // minutes and the maximum is 16 hours. - It can be set only if + // Connection Tracking is less than 5-tuple (i.e. Session Affinity is + // CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking + // Mode is PER_SESSION). For external passthrough Network Load Balancers + // the default is 60 seconds. This option is not available publicly. IdleTimeoutSec int64 `json:"idleTimeoutSec,omitempty"` // TrackingMode: Specifies the key used for connection tracking. There @@ -6826,9 +6831,9 @@ func (s *BackendServiceConnectionTrackingPolicy) MarshalJSON() ([]byte, error) { } // BackendServiceFailoverPolicy: For load balancers that have -// configurable failover: Internal TCP/UDP Load Balancing +// configurable failover: Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) -// and external TCP/UDP Load Balancing +// and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). // On failover or failback, this field indicates whether connection // draining will be honored. Google Cloud has a fixed connection @@ -6847,9 +6852,9 @@ type BackendServiceFailoverPolicy struct { // unhealthy.If set to false, connections are distributed among all // primary VMs when all primary and all backup backend VMs are // unhealthy. For load balancers that have configurable failover: - // Internal TCP/UDP Load Balancing + // Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) - // and external TCP/UDP Load Balancing + // and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). // The default is false. DropTrafficIfUnhealthy bool `json:"dropTrafficIfUnhealthy,omitempty"` @@ -13944,10 +13949,10 @@ func (s *FixedOrPercent) MarshalJSON() ([]byte, error) { // * Regional // (https://cloud.google.com/compute/docs/reference/rest/beta/forwardingRules) // A forwarding rule and its corresponding IP address represent the -// frontend configuration of a Google Cloud Platform load balancer. -// Forwarding rules can also reference target instances and Cloud VPN -// Classic gateways (targetVpnGateway). For more information, read -// Forwarding rule concepts and Using protocol forwarding. +// frontend configuration of a Google Cloud load balancer. Forwarding +// rules can also reference target instances and Cloud VPN Classic +// gateways (targetVpnGateway). For more information, read Forwarding +// rule concepts and Using protocol forwarding. type ForwardingRule struct { // IPAddress: IP address for which this forwarding rule accepts traffic. // When a client sends traffic to this IP address, the forwarding rule @@ -14011,8 +14016,9 @@ type ForwardingRule struct { // AllowGlobalAccess: This field is used along with the backend_service // field for internal load balancing or with the target field for // internal TargetInstance. If set to true, clients can access the - // Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load - // Balancer from all regions. If false, only allows access from the + // internal passthrough Network Load Balancers, the regional internal + // Application Load Balancer, and the regional internal proxy Network + // Load Balancer from all regions. If false, only allows access from the // local region the load balancer is located at. Note that for // INTERNAL_MANAGED forwarding rules, this field cannot be changed after // the forwarding rule is created. @@ -14030,16 +14036,16 @@ type ForwardingRule struct { AllowPscPacketInjection bool `json:"allowPscPacketInjection,omitempty"` // BackendService: Identifies the backend service to which the - // forwarding rule sends traffic. Required for Internal TCP/UDP Load - // Balancing and Network Load Balancing; must be omitted for all other + // forwarding rule sends traffic. Required for internal and external + // passthrough Network Load Balancers; must be omitted for all other // load balancer types. BackendService string `json:"backendService,omitempty"` // BaseForwardingRule: [Output Only] The URL for the corresponding base - // Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule + // forwarding rule. By base forwarding rule, we mean the forwarding rule // that has the same IP address, protocol, and port settings with the - // current Forwarding Rule, but without sourceIPRanges specified. Always - // empty if the current Forwarding Rule does not have sourceIPRanges + // current forwarding rule, but without sourceIPRanges specified. Always + // empty if the current forwarding rule does not have sourceIPRanges // specified. BaseForwardingRule string `json:"baseForwardingRule,omitempty"` @@ -14082,7 +14088,7 @@ type ForwardingRule struct { IsMirroringCollector bool `json:"isMirroringCollector,omitempty"` // Kind: [Output Only] Type of the resource. Always - // compute#forwardingRule for Forwarding Rule resources. + // compute#forwardingRule for forwarding rule resources. Kind string `json:"kind,omitempty"` // LabelFingerprint: A fingerprint for the labels being applied to this @@ -14145,10 +14151,10 @@ type ForwardingRule struct { Name string `json:"name,omitempty"` // Network: This field is not used for global external load balancing. - // For Internal TCP/UDP Load Balancing, this field identifies the - // network that the load balanced IP should belong to for this - // Forwarding Rule. If the subnetwork is specified, the network of the - // subnetwork will be used. If neither subnetwork nor this field is + // For internal passthrough Network Load Balancers, this field + // identifies the network that the load balanced IP should belong to for + // this forwarding rule. If the subnetwork is specified, the network of + // the subnetwork will be used. If neither subnetwork nor this field is // specified, the default network will be used. For Private Service // Connect forwarding rules that forward traffic to Google APIs, a // network must be provided. @@ -14215,7 +14221,7 @@ type ForwardingRule struct { Ports []string `json:"ports,omitempty"` // PscConnectionId: [Output Only] The PSC connection id of the PSC - // Forwarding Rule. + // forwarding rule. PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` // Possible values: @@ -14245,7 +14251,7 @@ type ForwardingRule struct { ServiceDirectoryRegistrations []*ForwardingRuleServiceDirectoryRegistration `json:"serviceDirectoryRegistrations,omitempty"` // ServiceLabel: An optional prefix to the service name for this - // Forwarding Rule. If specified, the prefix is the first label of the + // forwarding rule. If specified, the prefix is the first label of the // fully qualified service name. The label must be 1-63 characters long, // and comply with RFC1035. Specifically, the label must be 1-63 // characters long and match the regular expression @@ -14256,25 +14262,26 @@ type ForwardingRule struct { ServiceLabel string `json:"serviceLabel,omitempty"` // ServiceName: [Output Only] The internal fully qualified service name - // for this Forwarding Rule. This field is only used for internal load + // for this forwarding rule. This field is only used for internal load // balancing. ServiceName string `json:"serviceName,omitempty"` - // SourceIpRanges: If not empty, this Forwarding Rule will only forward + // SourceIpRanges: If not empty, this forwarding rule will only forward // the traffic when the source IP address matches one of the IP - // addresses or CIDR ranges set here. Note that a Forwarding Rule can + // addresses or CIDR ranges set here. Note that a forwarding rule can // only have up to 64 source IP ranges, and this field can only be used - // with a regional Forwarding Rule whose scheme is EXTERNAL. Each + // with a regional forwarding rule whose scheme is EXTERNAL. Each // source_ip_range entry should be either an IP address (for example, // 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24). SourceIpRanges []string `json:"sourceIpRanges,omitempty"` // Subnetwork: This field identifies the subnetwork that the load - // balanced IP should belong to for this Forwarding Rule, used in - // internal load balancing and network load balancing with IPv6. If the - // network specified is in auto subnet mode, this field is optional. - // However, a subnetwork must be specified if the network is in custom - // subnet mode or when creating external forwarding rule with IPv6. + // balanced IP should belong to for this forwarding rule, used with + // internal load balancers and external passthrough Network Load + // Balancers with IPv6. If the network specified is in auto subnet mode, + // this field is optional. However, a subnetwork must be specified if + // the network is in custom subnet mode or when creating external + // forwarding rule with IPv6. Subnetwork string `json:"subnetwork,omitempty"` // Target: The URL of the target resource to receive the matched @@ -14735,9 +14742,9 @@ func (s *ForwardingRuleReference) MarshalJSON() ([]byte, error) { } // ForwardingRuleServiceDirectoryRegistration: Describes the -// auto-registration of the Forwarding Rule to Service Directory. The +// auto-registration of the forwarding rule to Service Directory. The // region and project of the Service Directory resource generated from -// this registration will be the same as this Forwarding Rule. +// this registration will be the same as this forwarding rule. type ForwardingRuleServiceDirectoryRegistration struct { // Namespace: Service Directory namespace to register the forwarding // rule under. @@ -14749,8 +14756,8 @@ type ForwardingRuleServiceDirectoryRegistration struct { // ServiceDirectoryRegion: [Optional] Service Directory region to // register this global forwarding rule under. Default to "us-central1". - // Only used for PSC for Google APIs. All PSC for Google APIs Forwarding - // Rules on the same network should use the same Service Directory + // Only used for PSC for Google APIs. All PSC for Google APIs forwarding + // rules on the same network should use the same Service Directory // region. ServiceDirectoryRegion string `json:"serviceDirectoryRegion,omitempty"` @@ -14950,16 +14957,17 @@ func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { type FutureReservation struct { // AutoCreatedReservationsDeleteTime: Future timestamp when the FR - // auto-created reservations will be deleted by GCE. Format of this - // field must be a valid + // auto-created reservations will be deleted by Compute Engine. Format + // of this field must be a valid // href="https://www.ietf.org/rfc/rfc3339.txt">RFC3339 value. AutoCreatedReservationsDeleteTime string `json:"autoCreatedReservationsDeleteTime,omitempty"` // AutoCreatedReservationsDuration: Specifies the duration of // auto-created reservations. It represents relative time to future // reservation start_time when auto-created reservations will be - // automatically deleted by GCE. Duration time unit is represented as a - // count of seconds and fractions of seconds at nanosecond resolution. + // automatically deleted by Compute Engine. Duration time unit is + // represented as a count of seconds and fractions of seconds at + // nanosecond resolution. AutoCreatedReservationsDuration *Duration `json:"autoCreatedReservationsDuration,omitempty"` // AutoDeleteAutoCreatedReservations: Setting for enabling or disabling @@ -15958,7 +15966,7 @@ type GRPCHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -15968,7 +15976,7 @@ type GRPCHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -16398,7 +16406,7 @@ type HTTP2HealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -16408,7 +16416,7 @@ type HTTP2HealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -16492,7 +16500,7 @@ type HTTPHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Also supported in // legacy HTTP health checks for target pools. The health check supports // all backends supported by the backend service provided the backend @@ -16586,7 +16594,7 @@ type HTTPSHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -16596,7 +16604,7 @@ type HTTPSHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -16666,20 +16674,14 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { // (/compute/docs/reference/rest/beta/regionHealthChecks) * Global // (/compute/docs/reference/rest/beta/healthChecks) These health check // resources can be used for load balancing and for autohealing VMs in a -// managed instance group (MIG). **Load balancing** The following load -// balancer can use either regional or global health check: * Internal -// TCP/UDP load balancer The following load balancers require regional -// health check: * Internal HTTP(S) load balancer * Backend -// service-based network load balancer Traffic Director and the -// following load balancers require global health check: * External -// HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load -// balancer The following load balancer require legacy HTTP health -// checks (/compute/docs/reference/rest/v1/httpHealthChecks): * Target -// pool-based network load balancer **Autohealing in MIGs** The health -// checks that you use for autohealing VMs in a MIG can be either -// regional or global. For more information, see Set up an application -// health check and autohealing. For more information, see Health checks -// overview. +// managed instance group (MIG). **Load balancing** Health check +// requirements vary depending on the type of load balancer. For details +// about the type of health check supported for each load balancer and +// corresponding backend type, see Health checks overview: Load balancer +// guide. **Autohealing in MIGs** The health checks that you use for +// autohealing VMs in a MIG can be either regional or global. For more +// information, see Set up an application health check and autohealing. +// For more information, see Health checks overview. type HealthCheck struct { // CheckIntervalSec: How often (in seconds) to send a health check. The // default value is 5 seconds. @@ -20242,6 +20244,9 @@ type Instance struct { // corresponding input only field. ResourceStatus *ResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzi: [Output Only] Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -21671,6 +21676,10 @@ type InstanceGroupManagerInstanceFlexibilityPolicy struct { // properties that the group will use when creating new VMs. InstanceSelectionLists map[string]InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection `json:"instanceSelectionLists,omitempty"` + // InstanceSelections: Named instance selections configuring properties + // that the group will use when creating new VMs. + InstanceSelections map[string]InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection `json:"instanceSelections,omitempty"` + // ForceSendFields is a list of field names (e.g. // "InstanceSelectionLists") to unconditionally include in API requests. // By default, fields with empty or default values are omitted from API @@ -22403,14 +22412,15 @@ func (s *InstanceGroupManagerResizeRequestsListResponseWarningData) MarshalJSON( type InstanceGroupManagerStandbyPolicy struct { InitialDelaySec int64 `json:"initialDelaySec,omitempty"` - // Mode: Defines behaviour of using instances from standby pool to - // resize MIG. + // Mode: Defines how a MIG resumes or starts VMs from a standby pool + // when the group scales out. The default mode is `MANUAL`. // // Possible values: - // "MANUAL" - MIG does not automatically stop/start or suspend/resume - // VMs. - // "SCALE_OUT_POOL" - MIG automatically resumes and starts VMs when it - // scales out, and replenishes the standby pool afterwards. + // "MANUAL" - MIG does not automatically resume or start VMs in the + // standby pool when the group scales out. + // "SCALE_OUT_POOL" - MIG automatically resumes or starts VMs in the + // standby pool when the group scales out, and replenishes the standby + // pool afterwards. Mode string `json:"mode,omitempty"` // ForceSendFields is a list of field names (e.g. "InitialDelaySec") to @@ -43364,6 +43374,9 @@ type Quota struct { // "PREEMPTIBLE_NVIDIA_T4_GPUS" // "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS" // "PREEMPTIBLE_NVIDIA_V100_GPUS" + // "PREEMPTIBLE_TPU_LITE_DEVICE_V5" + // "PREEMPTIBLE_TPU_LITE_PODSLICE_V5" + // "PREEMPTIBLE_TPU_PODSLICE_V4" // "PRIVATE_V6_ACCESS_SUBNETWORKS" // "PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK" // "PSC_INTERNAL_LB_FORWARDING_RULES" @@ -43403,6 +43416,9 @@ type Quota struct { // "TARGET_SSL_PROXIES" // "TARGET_TCP_PROXIES" // "TARGET_VPN_GATEWAYS" + // "TPU_LITE_DEVICE_V5" + // "TPU_LITE_PODSLICE_V5" + // "TPU_PODSLICE_V4" // "URL_MAPS" // "VPN_GATEWAYS" // "VPN_TUNNELS" @@ -50695,7 +50711,7 @@ type SSLHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -50705,7 +50721,7 @@ type SSLHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -51932,10 +51948,20 @@ type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfi AutoDeployLoadThreshold float64 `json:"autoDeployLoadThreshold,omitempty"` + DetectionAbsoluteQps float64 `json:"detectionAbsoluteQps,omitempty"` + + DetectionLoadThreshold float64 `json:"detectionLoadThreshold,omitempty"` + + DetectionRelativeToBaselineQps float64 `json:"detectionRelativeToBaselineQps,omitempty"` + // Name: The name must be 1-63 characters long, and comply with RFC1035. // The name must be unique within the security policy. Name string `json:"name,omitempty"` + // TrafficGranularityConfigs: Configuration options for enabling + // Adaptive Protection to operate on specified granular traffic units. + TrafficGranularityConfigs []*SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig `json:"trafficGranularityConfigs,omitempty"` + // ForceSendFields is a list of field names (e.g. // "AutoDeployConfidenceThreshold") to unconditionally include in API // requests. By default, fields with empty or default values are omitted @@ -51967,6 +51993,9 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdC AutoDeployConfidenceThreshold gensupport.JSONFloat64 `json:"autoDeployConfidenceThreshold"` AutoDeployImpactedBaselineThreshold gensupport.JSONFloat64 `json:"autoDeployImpactedBaselineThreshold"` AutoDeployLoadThreshold gensupport.JSONFloat64 `json:"autoDeployLoadThreshold"` + DetectionAbsoluteQps gensupport.JSONFloat64 `json:"detectionAbsoluteQps"` + DetectionLoadThreshold gensupport.JSONFloat64 `json:"detectionLoadThreshold"` + DetectionRelativeToBaselineQps gensupport.JSONFloat64 `json:"detectionRelativeToBaselineQps"` *NoMethod } s1.NoMethod = (*NoMethod)(s) @@ -51976,9 +52005,58 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdC s.AutoDeployConfidenceThreshold = float64(s1.AutoDeployConfidenceThreshold) s.AutoDeployImpactedBaselineThreshold = float64(s1.AutoDeployImpactedBaselineThreshold) s.AutoDeployLoadThreshold = float64(s1.AutoDeployLoadThreshold) + s.DetectionAbsoluteQps = float64(s1.DetectionAbsoluteQps) + s.DetectionLoadThreshold = float64(s1.DetectionLoadThreshold) + s.DetectionRelativeToBaselineQps = float64(s1.DetectionRelativeToBaselineQps) return nil } +// SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThreshold +// ConfigTrafficGranularityConfig: Configurations to specifc granular +// traffic units processed by Adaptive Protection. +type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig struct { + // EnableEachUniqueValue: If enabled, traffic matching each unique value + // for the specified type constitutes a separate traffic unit. It can + // only be set to true if `value` is empty. + EnableEachUniqueValue bool `json:"enableEachUniqueValue,omitempty"` + + // Type: Type of this configuration. + // + // Possible values: + // "HTTP_HEADER_HOST" + // "HTTP_PATH" + // "UNSPECIFIED_TYPE" + Type string `json:"type,omitempty"` + + // Value: Requests that match this value constitute a granular traffic + // unit. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EnableEachUniqueValue") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EnableEachUniqueValue") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type SecurityPolicyAdvancedOptionsConfig struct { // JsonCustomConfig: Custom configuration to apply the JSON parsing. // Only applicable when json_parsing is set to STANDARD. @@ -53042,7 +53120,13 @@ type SecurityPolicyRuleRateLimitOptions struct { // Server name indication in the TLS session of the HTTPS request. The // key value is truncated to the first 128 bytes. The key type defaults // to ALL on a HTTP session. - REGION_CODE: The country/region from - // which the request originates. + // which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL + // fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If + // not available, the key type defaults to ALL. - USER_IP: The IP + // address of the originating client, which is resolved based on + // "userIpRequestHeaders" configured with the security policy. If there + // is no "userIpRequestHeaders" configuration or an IP address cannot be + // resolved from it, the key type defaults to IP. // // Possible values: // "ALL" @@ -53053,6 +53137,8 @@ type SecurityPolicyRuleRateLimitOptions struct { // "IP" // "REGION_CODE" // "SNI" + // "TLS_JA3_FINGERPRINT" + // "USER_IP" // "XFF_IP" EnforceOnKey string `json:"enforceOnKey,omitempty"` @@ -53139,7 +53225,14 @@ type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { // bytes. - SNI: Server name indication in the TLS session of the HTTPS // request. The key value is truncated to the first 128 bytes. The key // type defaults to ALL on a HTTP session. - REGION_CODE: The - // country/region from which the request originates. + // country/region from which the request originates. - + // TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects + // using HTTPS, HTTP/2 or HTTP/3. If not available, the key type + // defaults to ALL. - USER_IP: The IP address of the originating client, + // which is resolved based on "userIpRequestHeaders" configured with the + // security policy. If there is no "userIpRequestHeaders" configuration + // or an IP address cannot be resolved from it, the key type defaults to + // IP. // // Possible values: // "ALL" @@ -53150,6 +53243,8 @@ type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { // "IP" // "REGION_CODE" // "SNI" + // "TLS_JA3_FINGERPRINT" + // "USER_IP" // "XFF_IP" EnforceOnKeyType string `json:"enforceOnKeyType,omitempty"` @@ -56873,9 +56968,9 @@ func (s *SslPoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { } // SslPolicy: Represents an SSL Policy resource. Use SSL policies to -// control the SSL features, such as versions and cipher suites, offered -// by an HTTPS or SSL Proxy load balancer. For more information, read -// SSL Policy Concepts. +// control SSL features, such as versions and cipher suites, that are +// offered by Application Load Balancers and proxy Network Load +// Balancers. For more information, read SSL policies overview. type SslPolicy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -58401,7 +58496,7 @@ type TCPHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -58411,7 +58506,7 @@ type TCPHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -60984,10 +61079,10 @@ func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { } // TargetPool: Represents a Target Pool resource. Target pools are used -// for network TCP/UDP load balancing. A target pool references member -// instances, an associated legacy HttpHealthCheck resource, and, -// optionally, a backup target pool. For more information, read Using -// target pools. +// with external passthrough Network Load Balancers. A target pool +// references member instances, an associated legacy HttpHealthCheck +// resource, and, optionally, a backup target pool. For more +// information, read Using target pools. type TargetPool struct { // BackupPool: The server-defined URL for the resource. This field is // applicable only when the containing target pool is serving a @@ -62003,10 +62098,10 @@ func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error } // TargetSslProxy: Represents a Target SSL Proxy resource. A target SSL -// proxy is a component of a SSL Proxy load balancer. Global forwarding -// rules reference a target SSL proxy, and the target proxy then -// references an external backend service. For more information, read -// Using Target Proxies. +// proxy is a component of a Proxy Network Load Balancer. The forwarding +// rule references the target SSL proxy, and the target proxy then +// references a backend service. For more information, read Proxy +// Network Load Balancer overview. type TargetSslProxy struct { // CertificateMap: URL of a certificate map that identifies a // certificate map associated with the given target proxy. This field @@ -62520,10 +62615,10 @@ func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { } // TargetTcpProxy: Represents a Target TCP Proxy resource. A target TCP -// proxy is a component of a TCP Proxy load balancer. Global forwarding -// rules reference target TCP proxy, and the target proxy then -// references an external backend service. For more information, read -// TCP Proxy Load Balancing overview. +// proxy is a component of a Proxy Network Load Balancer. The forwarding +// rule references the target TCP proxy, and the target proxy then +// references a backend service. For more information, read Proxy +// Network Load Balancer overview. type TargetTcpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -106823,8 +106918,8 @@ type InstanceGroupManagersListManagedInstancesCall struct { // instance, the currentAction is CREATING. If a previous action failed, // the list displays the errors for that failed action. The orderBy // query parameter is not supported. The `pageToken` query parameter is -// supported only in the alpha and beta API and only if the group's -// `listManagedInstancesResults` field is set to `PAGINATED`. +// supported only if the group's `listManagedInstancesResults` field is +// set to `PAGINATED`. // // - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. @@ -107010,7 +107105,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.listManagedInstances", @@ -122451,9 +122546,11 @@ func (r *InstancesService) Stop(project string, zone string, instance string) *I return c } -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false. +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": This +// property is required if the instance has any attached Local SSD +// disks. If false, Local SSD data will be preserved when the instance +// is suspended. If true, the contents of any attached Local SSD disks +// will be discarded. func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c @@ -122574,7 +122671,7 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // ], // "parameters": { // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", // "location": "query", // "type": "boolean" // }, @@ -122649,9 +122746,11 @@ func (r *InstancesService) Suspend(project string, zone string, instance string) return c } -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false. +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": This +// property is required if the instance has any attached Local SSD +// disks. If false, Local SSD data will be preserved when the instance +// is suspended. If true, the contents of any attached Local SSD disks +// will be discarded. func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c @@ -122772,7 +122871,7 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err // ], // "parameters": { // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", // "location": "query", // "type": "boolean" // }, @@ -172611,9 +172710,8 @@ type RegionInstanceGroupManagersListManagedInstancesCall struct { // group and instances that are scheduled to be created. The list // includes any current actions that the group has scheduled for its // instances. The orderBy query parameter is not supported. The -// `pageToken` query parameter is supported only in the alpha and beta -// API and only if the group's `listManagedInstancesResults` field is -// set to `PAGINATED`. +// `pageToken` query parameter is supported only if the group's +// `listManagedInstancesResults` field is set to `PAGINATED`. // // - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. @@ -172798,7 +172896,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.listManagedInstances", @@ -226187,8 +226285,8 @@ type TargetSslProxiesSetSslPolicyCall struct { // SetSslPolicy: Sets the SSL policy for TargetSslProxy. The SSL policy // specifies the server-side support for SSL features. This affects -// connections between clients and the SSL proxy load balancer. They do -// not affect the connection between the load balancer and the backends. +// connections between clients and the load balancer. They do not affect +// the connection between the load balancer and the backends. // // - project: Project ID for this request. // - targetSslProxy: Name of the TargetSslProxy resource whose SSL @@ -226310,7 +226408,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the load balancer. They do not affect the connection between the load balancer and the backends.", // "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", // "httpMethod": "POST", // "id": "compute.targetSslProxies.setSslPolicy", diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 2411236d40..0ba58f5647 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -8066,7 +8066,7 @@ ] }, "listManagedInstances": { - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listManagedInstances", @@ -11374,7 +11374,7 @@ ], "parameters": { "discardLocalSsd": { - "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", "location": "query", "type": "boolean" }, @@ -11426,7 +11426,7 @@ ], "parameters": { "discardLocalSsd": { - "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", "location": "query", "type": "boolean" }, @@ -21869,7 +21869,7 @@ ] }, "listManagedInstances": { - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.listManagedInstances", @@ -26547,6 +26547,73 @@ } } }, + "regionZones": { + "methods": { + "list": { + "description": "Retrieves the list of Zone resources under the specific region available to the specified project.", + "flatPath": "projects/{project}/regions/{region}/zones", + "httpMethod": "GET", + "id": "compute.regionZones.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/zones", + "response": { + "$ref": "ZoneList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, "regions": { "methods": { "get": { @@ -33196,7 +33263,7 @@ ] }, "setSslPolicy": { - "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the load balancer. They do not affect the connection between the load balancer and the backends.", "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", "httpMethod": "POST", "id": "compute.targetSslProxies.setSslPolicy", @@ -35267,7 +35334,7 @@ } } }, - "revision": "20231031", + "revision": "20231128", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -36528,6 +36595,80 @@ }, "type": "object" }, + "AllocationAggregateReservation": { + "description": "This reservation type is specified by total resource amounts (e.g. total count of CPUs) and can account for multiple instance SKUs. In other words, one can create instances of varying shapes against this reservation.", + "id": "AllocationAggregateReservation", + "properties": { + "inUseResources": { + "description": "[Output only] List of resources currently in use.", + "items": { + "$ref": "AllocationAggregateReservationReservedResourceInfo" + }, + "type": "array" + }, + "reservedResources": { + "description": "List of reserved resources (CPUs, memory, accelerators).", + "items": { + "$ref": "AllocationAggregateReservationReservedResourceInfo" + }, + "type": "array" + }, + "vmFamily": { + "description": "The VM family that all instances scheduled against this reservation must belong to.", + "enum": [ + "VM_FAMILY_CLOUD_TPU_LITE_DEVICE_CT5L", + "VM_FAMILY_CLOUD_TPU_LITE_POD_SLICE_CT5LP", + "VM_FAMILY_CLOUD_TPU_POD_SLICE_CT4P" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "workloadType": { + "description": "The workload type of the instances that will target this reservation.", + "enum": [ + "BATCH", + "SERVING", + "UNSPECIFIED" + ], + "enumDescriptions": [ + "Reserved resources will be optimized for BATCH workloads, such as ML training.", + "Reserved resources will be optimized for SERVING workloads, such as ML inference.", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "AllocationAggregateReservationReservedResourceInfo": { + "id": "AllocationAggregateReservationReservedResourceInfo", + "properties": { + "accelerator": { + "$ref": "AllocationAggregateReservationReservedResourceInfoAccelerator", + "description": "Properties of accelerator resources in this reservation." + } + }, + "type": "object" + }, + "AllocationAggregateReservationReservedResourceInfoAccelerator": { + "id": "AllocationAggregateReservationReservedResourceInfoAccelerator", + "properties": { + "acceleratorCount": { + "description": "Number of accelerators of specified type.", + "format": "int32", + "type": "integer" + }, + "acceleratorType": { + "description": "Full or partial URL to accelerator type. e.g. \"projects/{PROJECT}/zones/{ZONE}/acceleratorTypes/ct4l\"", + "type": "string" + } + }, + "type": "object" + }, "AllocationResourceStatus": { "description": "[Output Only] Contains output only fields.", "id": "AllocationResourceStatus", @@ -36801,6 +36942,10 @@ "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you specify this field when creating a VM, you can provide either the full or partial URL. For example, the following values are valid: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType If you specify this field when creating or updating an instance template or all-instances configuration, specify the type of the disk, not the URL. For example: pd-standard.", "type": "string" }, + "enableConfidentialCompute": { + "description": "Whether this disk is using confidential compute mode.", + "type": "boolean" + }, "labels": { "additionalProperties": { "type": "string" @@ -38172,7 +38317,7 @@ "id": "BackendService", "properties": { "affinityCookieTtlSec": { - "description": "Lifetime of cookies in seconds. This setting is applicable to external and internal HTTP(S) load balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "Lifetime of cookies in seconds. This setting is applicable to Application Load Balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, @@ -38207,7 +38352,7 @@ }, "connectionTrackingPolicy": { "$ref": "BackendServiceConnectionTrackingPolicy", - "description": "Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for Network Load Balancing and Internal TCP/UDP Load Balancing." + "description": "Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for external passthrough Network Load Balancers and internal passthrough Network Load Balancers." }, "consistentHash": { "$ref": "ConsistentHashLoadBalancerSettings", @@ -38240,12 +38385,12 @@ "type": "string" }, "enableCDN": { - "description": "If true, enables Cloud CDN for the backend service of an external HTTP(S) load balancer.", + "description": "If true, enables Cloud CDN for the backend service of a global external Application Load Balancer.", "type": "boolean" }, "failoverPolicy": { "$ref": "BackendServiceFailoverPolicy", - "description": "Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview)." + "description": "Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview)." }, "fingerprint": { "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a BackendService.", @@ -38261,7 +38406,7 @@ }, "iap": { "$ref": "BackendServiceIAP", - "description": "The configurations for Identity-Aware Proxy on this resource. Not available for Internal TCP/UDP Load Balancing and Network Load Balancing." + "description": "The configurations for Identity-Aware Proxy on this resource. Not available for internal passthrough Network Load Balancers and external passthrough Network Load Balancers." }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -38284,10 +38429,10 @@ "INVALID_LOAD_BALANCING_SCHEME" ], "enumDescriptions": [ - "Signifies that this will be used for external HTTP(S), SSL Proxy, TCP Proxy, or Network Load Balancing", - "Signifies that this will be used for External Managed HTTP(S) Load Balancing.", - "Signifies that this will be used for Internal TCP/UDP Load Balancing.", - "Signifies that this will be used for Internal HTTP(S) Load Balancing.", + "Signifies that this will be used for classic Application Load Balancers, global external proxy Network Load Balancers, or external passthrough Network Load Balancers.", + "Signifies that this will be used for global external Application Load Balancers, regional external Application Load Balancers, or regional external proxy Network Load Balancers.", + "Signifies that this will be used for internal passthrough Network Load Balancers.", + "Signifies that this will be used for internal Application Load Balancers.", "Signifies that this will be used by Traffic Director.", "" ], @@ -38354,12 +38499,12 @@ }, "port": { "deprecated": true, - "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port.", + "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For internal passthrough Network Load Balancers and external passthrough Network Load Balancers, omit port.", "format": "int32", "type": "integer" }, "portName": { - "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port_name.", + "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For internal passthrough Network Load Balancers and external passthrough Network Load Balancers, omit port_name.", "type": "string" }, "protocol": { @@ -38738,11 +38883,11 @@ "type": "string" }, "enableStrongAffinity": { - "description": "Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly.", + "description": "Enable Strong Session Affinity for external passthrough Network Load Balancers. This option is not available publicly.", "type": "boolean" }, "idleTimeoutSec": { - "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly.", + "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For internal passthrough Network Load Balancers: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For external passthrough Network Load Balancers the default is 60 seconds. This option is not available publicly.", "format": "int32", "type": "integer" }, @@ -38764,7 +38909,7 @@ "type": "object" }, "BackendServiceFailoverPolicy": { - "description": "For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", + "description": "For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", "id": "BackendServiceFailoverPolicy", "properties": { "disableConnectionDrainOnFailover": { @@ -38772,7 +38917,7 @@ "type": "boolean" }, "dropTrafficIfUnhealthy": { - "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.", + "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.", "type": "boolean" }, "failoverRatio": { @@ -40783,6 +40928,10 @@ "$ref": "CustomerEncryptionKey", "description": "Encrypts the disk using a customer-supplied encryption key or a customer-managed encryption key. Encryption keys do not protect access to metadata of the disk. After you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later. For example, to create a disk snapshot, to create a disk image, to create a machine image, or to attach the disk to a virtual machine. After you encrypt a disk with a customer-managed key, the diskEncryptionKey.kmsKeyName is set to a key *version* name once the disk is created. The disk is encrypted with this version of the key. In the response, diskEncryptionKey.kmsKeyName appears in the following format: \"diskEncryptionKey.kmsKeyName\": \"projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeysVersions/version If you do not provide an encryption key when creating the disk, then the disk is encrypted using an automatically generated key and you don't need to provide a key to use the disk later." }, + "enableConfidentialCompute": { + "description": "Whether this disk is using confidential compute mode.", + "type": "boolean" + }, "guestOsFeatures": { "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", "items": { @@ -43535,7 +43684,7 @@ "type": "object" }, "ForwardingRule": { - "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/v1/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud Platform load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.", + "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/v1/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.", "id": "ForwardingRule", "properties": { "IPAddress": { @@ -43569,7 +43718,7 @@ "type": "boolean" }, "allowGlobalAccess": { - "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.", + "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the internal passthrough Network Load Balancers, the regional internal Application Load Balancer, and the regional internal proxy Network Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.", "type": "boolean" }, "allowPscGlobalAccess": { @@ -43577,11 +43726,11 @@ "type": "boolean" }, "backendService": { - "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for Internal TCP/UDP Load Balancing and Network Load Balancing; must be omitted for all other load balancer types.", + "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for internal and external passthrough Network Load Balancers; must be omitted for all other load balancer types.", "type": "string" }, "baseForwardingRule": { - "description": "[Output Only] The URL for the corresponding base Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the same IP address, protocol, and port settings with the current Forwarding Rule, but without sourceIPRanges specified. Always empty if the current Forwarding Rule does not have sourceIPRanges specified.", + "description": "[Output Only] The URL for the corresponding base forwarding rule. By base forwarding rule, we mean the forwarding rule that has the same IP address, protocol, and port settings with the current forwarding rule, but without sourceIPRanges specified. Always empty if the current forwarding rule does not have sourceIPRanges specified.", "type": "string" }, "creationTimestamp": { @@ -43622,7 +43771,7 @@ }, "kind": { "default": "compute#forwardingRule", - "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", + "description": "[Output Only] Type of the resource. Always compute#forwardingRule for forwarding rule resources.", "type": "string" }, "labelFingerprint": { @@ -43670,7 +43819,7 @@ "type": "string" }, "network": { - "description": "This field is not used for global external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "description": "This field is not used for global external load balancing. For internal passthrough Network Load Balancers, this field identifies the network that the load balanced IP should belong to for this forwarding rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", "type": "string" }, "networkTier": { @@ -43705,7 +43854,7 @@ "type": "array" }, "pscConnectionId": { - "description": "[Output Only] The PSC connection id of the PSC Forwarding Rule.", + "description": "[Output Only] The PSC connection id of the PSC forwarding rule.", "format": "uint64", "type": "string" }, @@ -43744,23 +43893,23 @@ "type": "array" }, "serviceLabel": { - "description": "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", + "description": "An optional prefix to the service name for this forwarding rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, "serviceName": { - "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.", + "description": "[Output Only] The internal fully qualified service name for this forwarding rule. This field is only used for internal load balancing.", "type": "string" }, "sourceIpRanges": { - "description": "If not empty, this Forwarding Rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a Forwarding Rule can only have up to 64 source IP ranges, and this field can only be used with a regional Forwarding Rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", + "description": "If not empty, this forwarding rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a forwarding rule can only have up to 64 source IP ranges, and this field can only be used with a regional forwarding rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", "items": { "type": "string" }, "type": "array" }, "subnetwork": { - "description": "This field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule, used in internal load balancing and network load balancing with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", + "description": "This field identifies the subnetwork that the load balanced IP should belong to for this forwarding rule, used with internal load balancers and external passthrough Network Load Balancers with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", "type": "string" }, "target": { @@ -44091,7 +44240,7 @@ "type": "object" }, "ForwardingRuleServiceDirectoryRegistration": { - "description": "Describes the auto-registration of the Forwarding Rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this Forwarding Rule.", + "description": "Describes the auto-registration of the forwarding rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this forwarding rule.", "id": "ForwardingRuleServiceDirectoryRegistration", "properties": { "namespace": { @@ -44103,7 +44252,7 @@ "type": "string" }, "serviceDirectoryRegion": { - "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region.", + "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs forwarding rules on the same network should use the same Service Directory region.", "type": "string" } }, @@ -44260,7 +44409,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -44496,7 +44645,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -44549,7 +44698,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -44602,7 +44751,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -44639,7 +44788,7 @@ "type": "object" }, "HealthCheck": { - "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) * [Global](/compute/docs/reference/rest/v1/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** The following load balancer can use either regional or global health check: * Internal TCP/UDP load balancer The following load balancers require regional health check: * Internal HTTP(S) load balancer * Backend service-based network load balancer Traffic Director and the following load balancers require global health check: * External HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load balancer The following load balancer require [legacy HTTP health checks](/compute/docs/reference/rest/v1/httpHealthChecks): * Target pool-based network load balancer **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.", + "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) * [Global](/compute/docs/reference/rest/v1/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** Health check requirements vary depending on the type of load balancer. For details about the type of health check supported for each load balancer and corresponding backend type, see Health checks overview: Load balancer guide. **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.", "id": "HealthCheck", "properties": { "checkIntervalSec": { @@ -46434,6 +46583,10 @@ "format": "int64", "type": "string" }, + "enableConfidentialCompute": { + "description": "Whether this image is created from a confidential compute mode disk. [Output Only]: This field is not set by user, but from source disk.", + "type": "boolean" + }, "family": { "description": "The name of the image family to which this image belongs. The image family name can be from a publicly managed image family provided by Compute Engine, or from a custom image family you create. For example, centos-stream-9 is a publicly available image family. For more information, see Image family best practices. When creating disks, you can specify an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035.", "type": "string" @@ -46976,6 +47129,10 @@ "$ref": "ResourceStatus", "description": "[Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field." }, + "satisfiesPzi": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -62017,6 +62174,9 @@ "PREEMPTIBLE_NVIDIA_T4_GPUS", "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS", "PREEMPTIBLE_NVIDIA_V100_GPUS", + "PREEMPTIBLE_TPU_LITE_DEVICE_V5", + "PREEMPTIBLE_TPU_LITE_PODSLICE_V5", + "PREEMPTIBLE_TPU_PODSLICE_V4", "PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK", "PSC_INTERNAL_LB_FORWARDING_RULES", "PUBLIC_ADVERTISED_PREFIXES", @@ -62054,6 +62214,9 @@ "TARGET_SSL_PROXIES", "TARGET_TCP_PROXIES", "TARGET_VPN_GATEWAYS", + "TPU_LITE_DEVICE_V5", + "TPU_LITE_PODSLICE_V5", + "TPU_PODSLICE_V4", "URL_MAPS", "VPN_GATEWAYS", "VPN_TUNNELS", @@ -62189,6 +62352,9 @@ "", "", "", + "", + "", + "", "The total number of snapshots allowed for a single project.", "", "", @@ -62208,6 +62374,9 @@ "", "", "", + "", + "", + "", "" ], "type": "string" @@ -63881,6 +64050,10 @@ "description": "Represents a reservation resource. A reservation ensures that capacity is held in a specific zone even if the reserved VMs are not running. For more information, read Reserving zonal resources.", "id": "Reservation", "properties": { + "aggregateReservation": { + "$ref": "AllocationAggregateReservation", + "description": "Reservation for aggregated resources, providing shape flexibility." + }, "commitment": { "description": "[Output Only] Full or partial URL to a parent commitment. This field displays for reservations that are tied to a commitment.", "type": "string" @@ -67246,7 +67419,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -68399,6 +68572,10 @@ "$ref": "Expr", "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing `evaluateThreatIntelligence` require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies." }, + "exprOptions": { + "$ref": "SecurityPolicyRuleMatcherExprOptions", + "description": "The configuration options available when specifying a user defined CEVAL expression (i.e., 'expr')." + }, "versionedExpr": { "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", "enum": [ @@ -68425,6 +68602,36 @@ }, "type": "object" }, + "SecurityPolicyRuleMatcherExprOptions": { + "id": "SecurityPolicyRuleMatcherExprOptions", + "properties": { + "recaptchaOptions": { + "$ref": "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions", + "description": "reCAPTCHA configuration options to be applied for the rule. If the rule does not evaluate reCAPTCHA tokens, this field will have no effect." + } + }, + "type": "object" + }, + "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions": { + "id": "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions", + "properties": { + "actionTokenSiteKeys": { + "description": "A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.", + "items": { + "type": "string" + }, + "type": "array" + }, + "sessionTokenSiteKeys": { + "description": "A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "SecurityPolicyRuleNetworkMatcher": { "description": "Represents a match condition that incoming network traffic is evaluated against.", "id": "SecurityPolicyRuleNetworkMatcher", @@ -68609,7 +68816,7 @@ "type": "string" }, "enforceOnKey": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on \"userIpRequestHeaders\" configured with the security policy. If there is no \"userIpRequestHeaders\" configuration or an IP address cannot be resolved from it, the key type defaults to IP. ", "enum": [ "ALL", "HTTP_COOKIE", @@ -68618,6 +68825,8 @@ "IP", "REGION_CODE", "SNI", + "TLS_JA3_FINGERPRINT", + "USER_IP", "XFF_IP" ], "enumDescriptions": [ @@ -68628,6 +68837,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -68666,7 +68877,7 @@ "type": "string" }, "enforceOnKeyType": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on \"userIpRequestHeaders\" configured with the security policy. If there is no \"userIpRequestHeaders\" configuration or an IP address cannot be resolved from it, the key type defaults to IP. ", "enum": [ "ALL", "HTTP_COOKIE", @@ -68675,6 +68886,8 @@ "IP", "REGION_CODE", "SNI", + "TLS_JA3_FINGERPRINT", + "USER_IP", "XFF_IP" ], "enumDescriptions": [ @@ -68685,6 +68898,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -69699,6 +69914,10 @@ "format": "int64", "type": "string" }, + "enableConfidentialCompute": { + "description": "Whether this snapshot is created from a confidential compute mode disk. [Output Only]: This field is not set by user, but from source disk.", + "type": "boolean" + }, "guestOsFeatures": { "description": "[Output Only] A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", "items": { @@ -71227,7 +71446,7 @@ "type": "object" }, "SslPolicy": { - "description": "Represents an SSL Policy resource. Use SSL policies to control the SSL features, such as versions and cipher suites, offered by an HTTPS or SSL Proxy load balancer. For more information, read SSL Policy Concepts.", + "description": "Represents an SSL Policy resource. Use SSL policies to control SSL features, such as versions and cipher suites, that are offered by Application Load Balancers and proxy Network Load Balancers. For more information, read SSL policies overview.", "id": "SslPolicy", "properties": { "creationTimestamp": { @@ -72288,7 +72507,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -74018,7 +74237,7 @@ "type": "object" }, "TargetPool": { - "description": "Represents a Target Pool resource. Target pools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools.", + "description": "Represents a Target Pool resource. Target pools are used with external passthrough Network Load Balancers. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools.", "id": "TargetPool", "properties": { "backupPool": { @@ -74681,7 +74900,7 @@ "type": "object" }, "TargetSslProxy": { - "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a SSL Proxy load balancer. Global forwarding rules reference a target SSL proxy, and the target proxy then references an external backend service. For more information, read Using Target Proxies.", + "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target SSL proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview.", "id": "TargetSslProxy", "properties": { "certificateMap": { @@ -75060,7 +75279,7 @@ "type": "object" }, "TargetTcpProxy": { - "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a TCP Proxy load balancer. Global forwarding rules reference target TCP proxy, and the target proxy then references an external backend service. For more information, read TCP Proxy Load Balancing overview.", + "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target TCP proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview.", "id": "TargetTcpProxy", "properties": { "creationTimestamp": { diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 2f9af3b44f..407df46302 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -227,6 +227,7 @@ func New(client *http.Client) (*Service, error) { s.RegionTargetHttpsProxies = NewRegionTargetHttpsProxiesService(s) s.RegionTargetTcpProxies = NewRegionTargetTcpProxiesService(s) s.RegionUrlMaps = NewRegionUrlMapsService(s) + s.RegionZones = NewRegionZonesService(s) s.Regions = NewRegionsService(s) s.Reservations = NewReservationsService(s) s.ResourcePolicies = NewResourcePoliciesService(s) @@ -396,6 +397,8 @@ type Service struct { RegionUrlMaps *RegionUrlMapsService + RegionZones *RegionZonesService + Regions *RegionsService Reservations *ReservationsService @@ -1066,6 +1069,15 @@ type RegionUrlMapsService struct { s *Service } +func NewRegionZonesService(s *Service) *RegionZonesService { + rs := &RegionZonesService{s: s} + return rs +} + +type RegionZonesService struct { + s *Service +} + func NewRegionsService(s *Service) *RegionsService { rs := &RegionsService{s: s} return rs @@ -2968,6 +2980,121 @@ func (s *AliasIpRange) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AllocationAggregateReservation: This reservation type is specified by +// total resource amounts (e.g. total count of CPUs) and can account for +// multiple instance SKUs. In other words, one can create instances of +// varying shapes against this reservation. +type AllocationAggregateReservation struct { + // InUseResources: [Output only] List of resources currently in use. + InUseResources []*AllocationAggregateReservationReservedResourceInfo `json:"inUseResources,omitempty"` + + // ReservedResources: List of reserved resources (CPUs, memory, + // accelerators). + ReservedResources []*AllocationAggregateReservationReservedResourceInfo `json:"reservedResources,omitempty"` + + // VmFamily: The VM family that all instances scheduled against this + // reservation must belong to. + // + // Possible values: + // "VM_FAMILY_CLOUD_TPU_LITE_DEVICE_CT5L" + // "VM_FAMILY_CLOUD_TPU_LITE_POD_SLICE_CT5LP" + // "VM_FAMILY_CLOUD_TPU_POD_SLICE_CT4P" + VmFamily string `json:"vmFamily,omitempty"` + + // WorkloadType: The workload type of the instances that will target + // this reservation. + // + // Possible values: + // "BATCH" - Reserved resources will be optimized for BATCH workloads, + // such as ML training. + // "SERVING" - Reserved resources will be optimized for SERVING + // workloads, such as ML inference. + // "UNSPECIFIED" + WorkloadType string `json:"workloadType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InUseResources") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InUseResources") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AllocationAggregateReservation) MarshalJSON() ([]byte, error) { + type NoMethod AllocationAggregateReservation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AllocationAggregateReservationReservedResourceInfo struct { + // Accelerator: Properties of accelerator resources in this reservation. + Accelerator *AllocationAggregateReservationReservedResourceInfoAccelerator `json:"accelerator,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Accelerator") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Accelerator") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AllocationAggregateReservationReservedResourceInfo) MarshalJSON() ([]byte, error) { + type NoMethod AllocationAggregateReservationReservedResourceInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AllocationAggregateReservationReservedResourceInfoAccelerator struct { + // AcceleratorCount: Number of accelerators of specified type. + AcceleratorCount int64 `json:"acceleratorCount,omitempty"` + + // AcceleratorType: Full or partial URL to accelerator type. e.g. + // "projects/{PROJECT}/zones/{ZONE}/acceleratorTypes/ct4l" + AcceleratorType string `json:"acceleratorType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AcceleratorCount") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AcceleratorCount") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AllocationAggregateReservationReservedResourceInfoAccelerator) MarshalJSON() ([]byte, error) { + type NoMethod AllocationAggregateReservationReservedResourceInfoAccelerator + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AllocationResourceStatus: [Output Only] Contains output only fields. type AllocationResourceStatus struct { // SpecificSkuAllocation: Allocation Properties of this reservation. @@ -3375,6 +3502,10 @@ type AttachedDiskInitializeParams struct { // example: pd-standard. DiskType string `json:"diskType,omitempty"` + // EnableConfidentialCompute: Whether this disk is using confidential + // compute mode. + EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` + // Labels: Labels to apply to this disk. These can be later modified by // the disks.setLabels method. This field is only applicable for // persistent disks. @@ -5537,13 +5668,13 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { // For more information, see Backend Services. type BackendService struct { // AffinityCookieTtlSec: Lifetime of cookies in seconds. This setting is - // applicable to external and internal HTTP(S) load balancers and - // Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session - // affinity. If set to 0, the cookie is non-persistent and lasts only - // until the end of the browser session (or equivalent). The maximum - // allowed value is two weeks (1,209,600). Not supported when the - // backend service is referenced by a URL map that is bound to target - // gRPC proxy that has validateForProxyless field set to true. + // applicable to Application Load Balancers and Traffic Director and + // requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to + // 0, the cookie is non-persistent and lasts only until the end of the + // browser session (or equivalent). The maximum allowed value is two + // weeks (1,209,600). Not supported when the backend service is + // referenced by a URL map that is bound to target gRPC proxy that has + // validateForProxyless field set to true. AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` // Backends: The list of backends that serve this BackendService. @@ -5569,8 +5700,8 @@ type BackendService struct { // ConnectionTrackingPolicy: Connection Tracking configuration for this // BackendService. Connection tracking policy settings are only - // available for Network Load Balancing and Internal TCP/UDP Load - // Balancing. + // available for external passthrough Network Load Balancers and + // internal passthrough Network Load Balancers. ConnectionTrackingPolicy *BackendServiceConnectionTrackingPolicy `json:"connectionTrackingPolicy,omitempty"` // ConsistentHash: Consistent Hash-based load balancing can be used to @@ -5609,15 +5740,15 @@ type BackendService struct { // security policy associated with this backend service. EdgeSecurityPolicy string `json:"edgeSecurityPolicy,omitempty"` - // EnableCDN: If true, enables Cloud CDN for the backend service of an - // external HTTP(S) load balancer. + // EnableCDN: If true, enables Cloud CDN for the backend service of a + // global external Application Load Balancer. EnableCDN bool `json:"enableCDN,omitempty"` // FailoverPolicy: Requires at least one backend instance group to be // defined as a backup (failover) backend. For load balancers that have - // configurable failover: Internal TCP/UDP Load Balancing + // configurable failover: Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) - // and external TCP/UDP Load Balancing + // and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). FailoverPolicy *BackendServiceFailoverPolicy `json:"failoverPolicy,omitempty"` @@ -5641,8 +5772,8 @@ type BackendService struct { HealthChecks []string `json:"healthChecks,omitempty"` // Iap: The configurations for Identity-Aware Proxy on this resource. - // Not available for Internal TCP/UDP Load Balancing and Network Load - // Balancing. + // Not available for internal passthrough Network Load Balancers and + // external passthrough Network Load Balancers. Iap *BackendServiceIAP `json:"iap,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -5658,14 +5789,16 @@ type BackendService struct { // another. For more information, refer to Choosing a load balancer. // // Possible values: - // "EXTERNAL" - Signifies that this will be used for external HTTP(S), - // SSL Proxy, TCP Proxy, or Network Load Balancing - // "EXTERNAL_MANAGED" - Signifies that this will be used for External - // Managed HTTP(S) Load Balancing. - // "INTERNAL" - Signifies that this will be used for Internal TCP/UDP - // Load Balancing. - // "INTERNAL_MANAGED" - Signifies that this will be used for Internal - // HTTP(S) Load Balancing. + // "EXTERNAL" - Signifies that this will be used for classic + // Application Load Balancers, global external proxy Network Load + // Balancers, or external passthrough Network Load Balancers. + // "EXTERNAL_MANAGED" - Signifies that this will be used for global + // external Application Load Balancers, regional external Application + // Load Balancers, or regional external proxy Network Load Balancers. + // "INTERNAL" - Signifies that this will be used for internal + // passthrough Network Load Balancers. + // "INTERNAL_MANAGED" - Signifies that this will be used for internal + // Application Load Balancers. // "INTERNAL_SELF_MANAGED" - Signifies that this will be used by // Traffic Director. // "INVALID_LOAD_BALANCING_SCHEME" @@ -5807,16 +5940,18 @@ type BackendService struct { OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` // Port: Deprecated in favor of portName. The TCP port to connect on the - // backend. The default value is 80. For Internal TCP/UDP Load Balancing - // and Network Load Balancing, omit port. + // backend. The default value is 80. For internal passthrough Network + // Load Balancers and external passthrough Network Load Balancers, omit + // port. Port int64 `json:"port,omitempty"` // PortName: A named port on a backend instance group representing the // port for communication to the backend VMs in that group. The named // port must be defined on each backend instance group // (https://cloud.google.com/load-balancing/docs/backend-service#named_ports). - // This parameter has no meaning if the backends are NEGs. For Internal - // TCP/UDP Load Balancing and Network Load Balancing, omit port_name. + // This parameter has no meaning if the backends are NEGs. For internal + // passthrough Network Load Balancers and external passthrough Network + // Load Balancers, omit port_name. PortName string `json:"portName,omitempty"` // Protocol: The protocol this BackendService uses to communicate with @@ -6400,18 +6535,19 @@ type BackendServiceConnectionTrackingPolicy struct { // "NEVER_PERSIST" ConnectionPersistenceOnUnhealthyBackends string `json:"connectionPersistenceOnUnhealthyBackends,omitempty"` - // EnableStrongAffinity: Enable Strong Session Affinity for Network Load - // Balancing. This option is not available publicly. + // EnableStrongAffinity: Enable Strong Session Affinity for external + // passthrough Network Load Balancers. This option is not available + // publicly. EnableStrongAffinity bool `json:"enableStrongAffinity,omitempty"` // IdleTimeoutSec: Specifies how long to keep a Connection Tracking - // entry while there is no matching traffic (in seconds). For Internal - // TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the - // maximum is 16 hours. - It can be set only if Connection Tracking is - // less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, - // CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For - // Network Load Balancer the default is 60 seconds. This option is not - // available publicly. + // entry while there is no matching traffic (in seconds). For internal + // passthrough Network Load Balancers: - The minimum (default) is 10 + // minutes and the maximum is 16 hours. - It can be set only if + // Connection Tracking is less than 5-tuple (i.e. Session Affinity is + // CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking + // Mode is PER_SESSION). For external passthrough Network Load Balancers + // the default is 60 seconds. This option is not available publicly. IdleTimeoutSec int64 `json:"idleTimeoutSec,omitempty"` // TrackingMode: Specifies the key used for connection tracking. There @@ -6457,9 +6593,9 @@ func (s *BackendServiceConnectionTrackingPolicy) MarshalJSON() ([]byte, error) { } // BackendServiceFailoverPolicy: For load balancers that have -// configurable failover: Internal TCP/UDP Load Balancing +// configurable failover: Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) -// and external TCP/UDP Load Balancing +// and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). // On failover or failback, this field indicates whether connection // draining will be honored. Google Cloud has a fixed connection @@ -6478,9 +6614,9 @@ type BackendServiceFailoverPolicy struct { // unhealthy.If set to false, connections are distributed among all // primary VMs when all primary and all backup backend VMs are // unhealthy. For load balancers that have configurable failover: - // Internal TCP/UDP Load Balancing + // Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) - // and external TCP/UDP Load Balancing + // and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). // The default is false. DropTrafficIfUnhealthy bool `json:"dropTrafficIfUnhealthy,omitempty"` @@ -9340,6 +9476,10 @@ type Disk struct { // provide a key to use the disk later. DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` + // EnableConfidentialCompute: Whether this disk is using confidential + // compute mode. + EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` + // GuestOsFeatures: A list of features to enable on the guest operating // system. Applicable only for bootable images. Read Enabling guest // operating system features to see a list of available options. @@ -13317,10 +13457,10 @@ func (s *FixedOrPercent) MarshalJSON() ([]byte, error) { // * Regional // (https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules) // A forwarding rule and its corresponding IP address represent the -// frontend configuration of a Google Cloud Platform load balancer. -// Forwarding rules can also reference target instances and Cloud VPN -// Classic gateways (targetVpnGateway). For more information, read -// Forwarding rule concepts and Using protocol forwarding. +// frontend configuration of a Google Cloud load balancer. Forwarding +// rules can also reference target instances and Cloud VPN Classic +// gateways (targetVpnGateway). For more information, read Forwarding +// rule concepts and Using protocol forwarding. type ForwardingRule struct { // IPAddress: IP address for which this forwarding rule accepts traffic. // When a client sends traffic to this IP address, the forwarding rule @@ -13384,8 +13524,9 @@ type ForwardingRule struct { // AllowGlobalAccess: This field is used along with the backend_service // field for internal load balancing or with the target field for // internal TargetInstance. If set to true, clients can access the - // Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load - // Balancer from all regions. If false, only allows access from the + // internal passthrough Network Load Balancers, the regional internal + // Application Load Balancer, and the regional internal proxy Network + // Load Balancer from all regions. If false, only allows access from the // local region the load balancer is located at. Note that for // INTERNAL_MANAGED forwarding rules, this field cannot be changed after // the forwarding rule is created. @@ -13396,16 +13537,16 @@ type ForwardingRule struct { AllowPscGlobalAccess bool `json:"allowPscGlobalAccess,omitempty"` // BackendService: Identifies the backend service to which the - // forwarding rule sends traffic. Required for Internal TCP/UDP Load - // Balancing and Network Load Balancing; must be omitted for all other + // forwarding rule sends traffic. Required for internal and external + // passthrough Network Load Balancers; must be omitted for all other // load balancer types. BackendService string `json:"backendService,omitempty"` // BaseForwardingRule: [Output Only] The URL for the corresponding base - // Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule + // forwarding rule. By base forwarding rule, we mean the forwarding rule // that has the same IP address, protocol, and port settings with the - // current Forwarding Rule, but without sourceIPRanges specified. Always - // empty if the current Forwarding Rule does not have sourceIPRanges + // current forwarding rule, but without sourceIPRanges specified. Always + // empty if the current forwarding rule does not have sourceIPRanges // specified. BaseForwardingRule string `json:"baseForwardingRule,omitempty"` @@ -13448,7 +13589,7 @@ type ForwardingRule struct { IsMirroringCollector bool `json:"isMirroringCollector,omitempty"` // Kind: [Output Only] Type of the resource. Always - // compute#forwardingRule for Forwarding Rule resources. + // compute#forwardingRule for forwarding rule resources. Kind string `json:"kind,omitempty"` // LabelFingerprint: A fingerprint for the labels being applied to this @@ -13511,10 +13652,10 @@ type ForwardingRule struct { Name string `json:"name,omitempty"` // Network: This field is not used for global external load balancing. - // For Internal TCP/UDP Load Balancing, this field identifies the - // network that the load balanced IP should belong to for this - // Forwarding Rule. If the subnetwork is specified, the network of the - // subnetwork will be used. If neither subnetwork nor this field is + // For internal passthrough Network Load Balancers, this field + // identifies the network that the load balanced IP should belong to for + // this forwarding rule. If the subnetwork is specified, the network of + // the subnetwork will be used. If neither subnetwork nor this field is // specified, the default network will be used. For Private Service // Connect forwarding rules that forward traffic to Google APIs, a // network must be provided. @@ -13581,7 +13722,7 @@ type ForwardingRule struct { Ports []string `json:"ports,omitempty"` // PscConnectionId: [Output Only] The PSC connection id of the PSC - // Forwarding Rule. + // forwarding rule. PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` // Possible values: @@ -13611,7 +13752,7 @@ type ForwardingRule struct { ServiceDirectoryRegistrations []*ForwardingRuleServiceDirectoryRegistration `json:"serviceDirectoryRegistrations,omitempty"` // ServiceLabel: An optional prefix to the service name for this - // Forwarding Rule. If specified, the prefix is the first label of the + // forwarding rule. If specified, the prefix is the first label of the // fully qualified service name. The label must be 1-63 characters long, // and comply with RFC1035. Specifically, the label must be 1-63 // characters long and match the regular expression @@ -13622,25 +13763,26 @@ type ForwardingRule struct { ServiceLabel string `json:"serviceLabel,omitempty"` // ServiceName: [Output Only] The internal fully qualified service name - // for this Forwarding Rule. This field is only used for internal load + // for this forwarding rule. This field is only used for internal load // balancing. ServiceName string `json:"serviceName,omitempty"` - // SourceIpRanges: If not empty, this Forwarding Rule will only forward + // SourceIpRanges: If not empty, this forwarding rule will only forward // the traffic when the source IP address matches one of the IP - // addresses or CIDR ranges set here. Note that a Forwarding Rule can + // addresses or CIDR ranges set here. Note that a forwarding rule can // only have up to 64 source IP ranges, and this field can only be used - // with a regional Forwarding Rule whose scheme is EXTERNAL. Each + // with a regional forwarding rule whose scheme is EXTERNAL. Each // source_ip_range entry should be either an IP address (for example, // 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24). SourceIpRanges []string `json:"sourceIpRanges,omitempty"` // Subnetwork: This field identifies the subnetwork that the load - // balanced IP should belong to for this Forwarding Rule, used in - // internal load balancing and network load balancing with IPv6. If the - // network specified is in auto subnet mode, this field is optional. - // However, a subnetwork must be specified if the network is in custom - // subnet mode or when creating external forwarding rule with IPv6. + // balanced IP should belong to for this forwarding rule, used with + // internal load balancers and external passthrough Network Load + // Balancers with IPv6. If the network specified is in auto subnet mode, + // this field is optional. However, a subnetwork must be specified if + // the network is in custom subnet mode or when creating external + // forwarding rule with IPv6. Subnetwork string `json:"subnetwork,omitempty"` // Target: The URL of the target resource to receive the matched @@ -14101,9 +14243,9 @@ func (s *ForwardingRuleReference) MarshalJSON() ([]byte, error) { } // ForwardingRuleServiceDirectoryRegistration: Describes the -// auto-registration of the Forwarding Rule to Service Directory. The +// auto-registration of the forwarding rule to Service Directory. The // region and project of the Service Directory resource generated from -// this registration will be the same as this Forwarding Rule. +// this registration will be the same as this forwarding rule. type ForwardingRuleServiceDirectoryRegistration struct { // Namespace: Service Directory namespace to register the forwarding // rule under. @@ -14115,8 +14257,8 @@ type ForwardingRuleServiceDirectoryRegistration struct { // ServiceDirectoryRegion: [Optional] Service Directory region to // register this global forwarding rule under. Default to "us-central1". - // Only used for PSC for Google APIs. All PSC for Google APIs Forwarding - // Rules on the same network should use the same Service Directory + // Only used for PSC for Google APIs. All PSC for Google APIs forwarding + // rules on the same network should use the same Service Directory // region. ServiceDirectoryRegion string `json:"serviceDirectoryRegion,omitempty"` @@ -14333,7 +14475,7 @@ type GRPCHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -14343,7 +14485,7 @@ type GRPCHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -14773,7 +14915,7 @@ type HTTP2HealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -14783,7 +14925,7 @@ type HTTP2HealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -14867,7 +15009,7 @@ type HTTPHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Also supported in // legacy HTTP health checks for target pools. The health check supports // all backends supported by the backend service provided the backend @@ -14961,7 +15103,7 @@ type HTTPSHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -14971,7 +15113,7 @@ type HTTPSHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -15041,20 +15183,14 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { // (/compute/docs/reference/rest/v1/regionHealthChecks) * Global // (/compute/docs/reference/rest/v1/healthChecks) These health check // resources can be used for load balancing and for autohealing VMs in a -// managed instance group (MIG). **Load balancing** The following load -// balancer can use either regional or global health check: * Internal -// TCP/UDP load balancer The following load balancers require regional -// health check: * Internal HTTP(S) load balancer * Backend -// service-based network load balancer Traffic Director and the -// following load balancers require global health check: * External -// HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load -// balancer The following load balancer require legacy HTTP health -// checks (/compute/docs/reference/rest/v1/httpHealthChecks): * Target -// pool-based network load balancer **Autohealing in MIGs** The health -// checks that you use for autohealing VMs in a MIG can be either -// regional or global. For more information, see Set up an application -// health check and autohealing. For more information, see Health checks -// overview. +// managed instance group (MIG). **Load balancing** Health check +// requirements vary depending on the type of load balancer. For details +// about the type of health check supported for each load balancer and +// corresponding backend type, see Health checks overview: Load balancer +// guide. **Autohealing in MIGs** The health checks that you use for +// autohealing VMs in a MIG can be either regional or global. For more +// information, see Set up an application health check and autohealing. +// For more information, see Health checks overview. type HealthCheck struct { // CheckIntervalSec: How often (in seconds) to send a health check. The // default value is 5 seconds. @@ -17753,6 +17889,11 @@ type Image struct { // (in GB). DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + // EnableConfidentialCompute: Whether this image is created from a + // confidential compute mode disk. [Output Only]: This field is not set + // by user, but from source disk. + EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` + // Family: The name of the image family to which this image belongs. The // image family name can be from a publicly managed image family // provided by Compute Engine, or from a custom image family you create. @@ -18437,6 +18578,9 @@ type Instance struct { // corresponding input only field. ResourceStatus *ResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzi: [Output Only] Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -39302,6 +39446,9 @@ type Quota struct { // "PREEMPTIBLE_NVIDIA_T4_GPUS" // "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS" // "PREEMPTIBLE_NVIDIA_V100_GPUS" + // "PREEMPTIBLE_TPU_LITE_DEVICE_V5" + // "PREEMPTIBLE_TPU_LITE_PODSLICE_V5" + // "PREEMPTIBLE_TPU_PODSLICE_V4" // "PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK" // "PSC_INTERNAL_LB_FORWARDING_RULES" // "PUBLIC_ADVERTISED_PREFIXES" @@ -39340,6 +39487,9 @@ type Quota struct { // "TARGET_SSL_PROXIES" // "TARGET_TCP_PROXIES" // "TARGET_VPN_GATEWAYS" + // "TPU_LITE_DEVICE_V5" + // "TPU_LITE_PODSLICE_V5" + // "TPU_PODSLICE_V4" // "URL_MAPS" // "VPN_GATEWAYS" // "VPN_TUNNELS" @@ -41876,6 +42026,10 @@ func (s *RequestMirrorPolicy) MarshalJSON() ([]byte, error) { // that capacity is held in a specific zone even if the reserved VMs are // not running. For more information, read Reserving zonal resources. type Reservation struct { + // AggregateReservation: Reservation for aggregated resources, providing + // shape flexibility. + AggregateReservation *AllocationAggregateReservation `json:"aggregateReservation,omitempty"` + // Commitment: [Output Only] Full or partial URL to a parent commitment. // This field displays for reservations that are tied to a commitment. Commitment string `json:"commitment,omitempty"` @@ -41956,20 +42110,22 @@ type Reservation struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Commitment") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AggregateReservation") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Commitment") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AggregateReservation") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -46270,7 +46426,7 @@ type SSLHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -46280,7 +46436,7 @@ type SSLHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -47980,6 +48136,10 @@ type SecurityPolicyRuleMatcher struct { // Security Policies. Expr *Expr `json:"expr,omitempty"` + // ExprOptions: The configuration options available when specifying a + // user defined CEVAL expression (i.e., 'expr'). + ExprOptions *SecurityPolicyRuleMatcherExprOptions `json:"exprOptions,omitempty"` + // VersionedExpr: Preconfigured versioned expression. If this field is // specified, config must also be specified. Available preconfigured // expressions along with their requirements are: SRC_IPS_V1 - must @@ -48041,6 +48201,73 @@ func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SecurityPolicyRuleMatcherExprOptions struct { + // RecaptchaOptions: reCAPTCHA configuration options to be applied for + // the rule. If the rule does not evaluate reCAPTCHA tokens, this field + // will have no effect. + RecaptchaOptions *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions `json:"recaptchaOptions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RecaptchaOptions") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RecaptchaOptions") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRuleMatcherExprOptions) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleMatcherExprOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions struct { + // ActionTokenSiteKeys: A list of site keys to be used during the + // validation of reCAPTCHA action-tokens. The provided site keys need to + // be created from reCAPTCHA API under the same project where the + // security policy is created. + ActionTokenSiteKeys []string `json:"actionTokenSiteKeys,omitempty"` + + // SessionTokenSiteKeys: A list of site keys to be used during the + // validation of reCAPTCHA session-tokens. The provided site keys need + // to be created from reCAPTCHA API under the same project where the + // security policy is created. + SessionTokenSiteKeys []string `json:"sessionTokenSiteKeys,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ActionTokenSiteKeys") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ActionTokenSiteKeys") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SecurityPolicyRuleNetworkMatcher: Represents a match condition that // incoming network traffic is evaluated against. type SecurityPolicyRuleNetworkMatcher struct { @@ -48296,7 +48523,13 @@ type SecurityPolicyRuleRateLimitOptions struct { // Server name indication in the TLS session of the HTTPS request. The // key value is truncated to the first 128 bytes. The key type defaults // to ALL on a HTTP session. - REGION_CODE: The country/region from - // which the request originates. + // which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL + // fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If + // not available, the key type defaults to ALL. - USER_IP: The IP + // address of the originating client, which is resolved based on + // "userIpRequestHeaders" configured with the security policy. If there + // is no "userIpRequestHeaders" configuration or an IP address cannot be + // resolved from it, the key type defaults to IP. // // Possible values: // "ALL" @@ -48306,6 +48539,8 @@ type SecurityPolicyRuleRateLimitOptions struct { // "IP" // "REGION_CODE" // "SNI" + // "TLS_JA3_FINGERPRINT" + // "USER_IP" // "XFF_IP" EnforceOnKey string `json:"enforceOnKey,omitempty"` @@ -48392,7 +48627,14 @@ type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { // bytes. - SNI: Server name indication in the TLS session of the HTTPS // request. The key value is truncated to the first 128 bytes. The key // type defaults to ALL on a HTTP session. - REGION_CODE: The - // country/region from which the request originates. + // country/region from which the request originates. - + // TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects + // using HTTPS, HTTP/2 or HTTP/3. If not available, the key type + // defaults to ALL. - USER_IP: The IP address of the originating client, + // which is resolved based on "userIpRequestHeaders" configured with the + // security policy. If there is no "userIpRequestHeaders" configuration + // or an IP address cannot be resolved from it, the key type defaults to + // IP. // // Possible values: // "ALL" @@ -48402,6 +48644,8 @@ type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { // "IP" // "REGION_CODE" // "SNI" + // "TLS_JA3_FINGERPRINT" + // "USER_IP" // "XFF_IP" EnforceOnKeyType string `json:"enforceOnKeyType,omitempty"` @@ -49884,6 +50128,11 @@ type Snapshot struct { // snapshot to a disk. DownloadBytes int64 `json:"downloadBytes,omitempty,string"` + // EnableConfidentialCompute: Whether this snapshot is created from a + // confidential compute mode disk. [Output Only]: This field is not set + // by user, but from source disk. + EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` + // GuestOsFeatures: [Output Only] A list of features to enable on the // guest operating system. Applicable only for bootable images. Read // Enabling guest operating system features to see a list of available @@ -51872,9 +52121,9 @@ func (s *SslPoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { } // SslPolicy: Represents an SSL Policy resource. Use SSL policies to -// control the SSL features, such as versions and cipher suites, offered -// by an HTTPS or SSL Proxy load balancer. For more information, read -// SSL Policy Concepts. +// control SSL features, such as versions and cipher suites, that are +// offered by Application Load Balancers and proxy Network Load +// Balancers. For more information, read SSL policies overview. type SslPolicy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -53362,7 +53611,7 @@ type TCPHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -53372,7 +53621,7 @@ type TCPHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -55767,10 +56016,10 @@ func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { } // TargetPool: Represents a Target Pool resource. Target pools are used -// for network TCP/UDP load balancing. A target pool references member -// instances, an associated legacy HttpHealthCheck resource, and, -// optionally, a backup target pool. For more information, read Using -// target pools. +// with external passthrough Network Load Balancers. A target pool +// references member instances, an associated legacy HttpHealthCheck +// resource, and, optionally, a backup target pool. For more +// information, read Using target pools. type TargetPool struct { // BackupPool: The server-defined URL for the resource. This field is // applicable only when the containing target pool is serving a @@ -56786,10 +57035,10 @@ func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error } // TargetSslProxy: Represents a Target SSL Proxy resource. A target SSL -// proxy is a component of a SSL Proxy load balancer. Global forwarding -// rules reference a target SSL proxy, and the target proxy then -// references an external backend service. For more information, read -// Using Target Proxies. +// proxy is a component of a Proxy Network Load Balancer. The forwarding +// rule references the target SSL proxy, and the target proxy then +// references a backend service. For more information, read Proxy +// Network Load Balancer overview. type TargetSslProxy struct { // CertificateMap: URL of a certificate map that identifies a // certificate map associated with the given target proxy. This field @@ -57303,10 +57552,10 @@ func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { } // TargetTcpProxy: Represents a Target TCP Proxy resource. A target TCP -// proxy is a component of a TCP Proxy load balancer. Global forwarding -// rules reference target TCP proxy, and the target proxy then -// references an external backend service. For more information, read -// TCP Proxy Load Balancing overview. +// proxy is a component of a Proxy Network Load Balancer. The forwarding +// rule references the target TCP proxy, and the target proxy then +// references a backend service. For more information, read Proxy +// Network Load Balancer overview. type TargetTcpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -97528,8 +97777,8 @@ type InstanceGroupManagersListManagedInstancesCall struct { // instance, the currentAction is CREATING. If a previous action failed, // the list displays the errors for that failed action. The orderBy // query parameter is not supported. The `pageToken` query parameter is -// supported only in the alpha and beta API and only if the group's -// `listManagedInstancesResults` field is set to `PAGINATED`. +// supported only if the group's `listManagedInstancesResults` field is +// set to `PAGINATED`. // // - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. @@ -97715,7 +97964,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.listManagedInstances", @@ -110699,9 +110948,11 @@ func (r *InstancesService) Stop(project string, zone string, instance string) *I return c } -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false. +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": This +// property is required if the instance has any attached Local SSD +// disks. If false, Local SSD data will be preserved when the instance +// is suspended. If true, the contents of any attached Local SSD disks +// will be discarded. func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c @@ -110822,7 +111073,7 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // ], // "parameters": { // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", // "location": "query", // "type": "boolean" // }, @@ -110897,9 +111148,11 @@ func (r *InstancesService) Suspend(project string, zone string, instance string) return c } -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false. +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": This +// property is required if the instance has any attached Local SSD +// disks. If false, Local SSD data will be preserved when the instance +// is suspended. If true, the contents of any attached Local SSD disks +// will be discarded. func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c @@ -111020,7 +111273,7 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err // ], // "parameters": { // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", // "location": "query", // "type": "boolean" // }, @@ -154556,9 +154809,8 @@ type RegionInstanceGroupManagersListManagedInstancesCall struct { // group and instances that are scheduled to be created. The list // includes any current actions that the group has scheduled for its // instances. The orderBy query parameter is not supported. The -// `pageToken` query parameter is supported only in the alpha and beta -// API and only if the group's `listManagedInstancesResults` field is -// set to `PAGINATED`. +// `pageToken` query parameter is supported only if the group's +// `listManagedInstancesResults` field is set to `PAGINATED`. // // - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. @@ -154743,7 +154995,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.listManagedInstances", @@ -173203,6 +173455,298 @@ func (c *RegionUrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsVa } +// method id "compute.regionZones.list": + +type RegionZonesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of Zone resources under the specific region +// available to the specified project. +// +// - project: Project ID for this request. +// - region: Region for this request. +func (r *RegionZonesService) List(project string, region string) *RegionZonesListCall { + c := &RegionZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. +func (c *RegionZonesListCall) Filter(filter string) *RegionZonesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionZonesListCall) MaxResults(maxResults int64) *RegionZonesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionZonesListCall) OrderBy(orderBy string) *RegionZonesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionZonesListCall) PageToken(pageToken string) *RegionZonesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionZonesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionZonesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionZonesListCall) Fields(s ...googleapi.Field) *RegionZonesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionZonesListCall) IfNoneMatch(entityTag string) *RegionZonesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionZonesListCall) Context(ctx context.Context) *RegionZonesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionZonesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionZonesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/zones") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionZones.list" call. +// Exactly one of *ZoneList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *ZoneList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ZoneList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of Zone resources under the specific region available to the specified project.", + // "flatPath": "projects/{project}/regions/{region}/zones", + // "httpMethod": "GET", + // "id": "compute.regionZones.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/zones", + // "response": { + // "$ref": "ZoneList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionZonesListCall) Pages(ctx context.Context, f func(*ZoneList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.regions.get": type RegionsGetCall struct { @@ -201589,8 +202133,8 @@ type TargetSslProxiesSetSslPolicyCall struct { // SetSslPolicy: Sets the SSL policy for TargetSslProxy. The SSL policy // specifies the server-side support for SSL features. This affects -// connections between clients and the SSL proxy load balancer. They do -// not affect the connection between the load balancer and the backends. +// connections between clients and the load balancer. They do not affect +// the connection between the load balancer and the backends. // // - project: Project ID for this request. // - targetSslProxy: Name of the TargetSslProxy resource whose SSL @@ -201712,7 +202256,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the load balancer. They do not affect the connection between the load balancer and the backends.", // "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", // "httpMethod": "POST", // "id": "compute.targetSslProxies.setSslPolicy", diff --git a/vendor/google.golang.org/api/container/v1/container-api.json b/vendor/google.golang.org/api/container/v1/container-api.json index b8b8df51cd..5d3d1feeb8 100644 --- a/vendor/google.golang.org/api/container/v1/container-api.json +++ b/vendor/google.golang.org/api/container/v1/container-api.json @@ -2540,7 +2540,7 @@ } } }, - "revision": "20231030", + "revision": "20231122", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2687,6 +2687,10 @@ "description": "Expose flow metrics on nodes", "type": "boolean" }, + "enableRelay": { + "description": "Enable Relay component", + "type": "boolean" + }, "relayMode": { "description": "Method used to make Relay available", "enum": [ @@ -6318,12 +6322,14 @@ "enum": [ "VULNERABILITY_MODE_UNSPECIFIED", "VULNERABILITY_DISABLED", - "VULNERABILITY_BASIC" + "VULNERABILITY_BASIC", + "VULNERABILITY_ENTERPRISE" ], "enumDescriptions": [ "Default value not specified.", "Disables vulnerability scanning on the cluster.", - "Applies basic vulnerability scanning on the cluster." + "Applies basic vulnerability scanning on the cluster.", + "Applies the Security Posture's vulnerability on cluster Enterprise level features." ], "type": "string" } diff --git a/vendor/google.golang.org/api/container/v1/container-gen.go b/vendor/google.golang.org/api/container/v1/container-gen.go index 6d60e3d704..e29f8e4664 100644 --- a/vendor/google.golang.org/api/container/v1/container-gen.go +++ b/vendor/google.golang.org/api/container/v1/container-gen.go @@ -535,6 +535,9 @@ type AdvancedDatapathObservabilityConfig struct { // EnableMetrics: Expose flow metrics on nodes EnableMetrics bool `json:"enableMetrics,omitempty"` + // EnableRelay: Enable Relay component + EnableRelay bool `json:"enableRelay,omitempty"` + // RelayMode: Method used to make Relay available // // Possible values: @@ -6561,6 +6564,8 @@ type SecurityPostureConfig struct { // cluster. // "VULNERABILITY_BASIC" - Applies basic vulnerability scanning on the // cluster. + // "VULNERABILITY_ENTERPRISE" - Applies the Security Posture's + // vulnerability on cluster Enterprise level features. VulnerabilityMode string `json:"vulnerabilityMode,omitempty"` // ForceSendFields is a list of field names (e.g. "Mode") to diff --git a/vendor/google.golang.org/api/dns/v1/dns-api.json b/vendor/google.golang.org/api/dns/v1/dns-api.json index 2fb9463fb3..8b4d553633 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-api.json +++ b/vendor/google.golang.org/api/dns/v1/dns-api.json @@ -1824,7 +1824,7 @@ } } }, - "revision": "20230831", + "revision": "20231130", "rootUrl": "https://dns.googleapis.com/", "schemas": { "Change": { @@ -3025,6 +3025,10 @@ "geo": { "$ref": "RRSetRoutingPolicyGeoPolicy" }, + "healthCheck": { + "description": "The selfLink attribute of the HealthCheck resource to use for this RRSetRoutingPolicy. https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks", + "type": "string" + }, "kind": { "default": "dns#rRSetRoutingPolicy", "type": "string" @@ -3093,10 +3097,18 @@ "type": "object" }, "RRSetRoutingPolicyHealthCheckTargets": { - "description": "HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response.", + "description": "HealthCheckTargets describes endpoints to health-check when responding to Routing Policy queries. Only the healthy endpoints will be included in the response. Only one of internal_load_balancer and external_endpoints should be set.", "id": "RRSetRoutingPolicyHealthCheckTargets", "properties": { + "externalEndpoints": { + "description": "The Internet IP addresses to be health checked. The format matches the format of ResourceRecordSet.rrdata as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1)", + "items": { + "type": "string" + }, + "type": "array" + }, "internalLoadBalancers": { + "description": "Configuration for internal load balancers to be health checked.", "items": { "$ref": "RRSetRoutingPolicyLoadBalancerTarget" }, diff --git a/vendor/google.golang.org/api/dns/v1/dns-gen.go b/vendor/google.golang.org/api/dns/v1/dns-gen.go index 54ab672467..ec3a9358f4 100644 --- a/vendor/google.golang.org/api/dns/v1/dns-gen.go +++ b/vendor/google.golang.org/api/dns/v1/dns-gen.go @@ -2357,6 +2357,11 @@ func (s *Quota) MarshalJSON() ([]byte, error) { type RRSetRoutingPolicy struct { Geo *RRSetRoutingPolicyGeoPolicy `json:"geo,omitempty"` + // HealthCheck: The selfLink attribute of the HealthCheck resource to + // use for this RRSetRoutingPolicy. + // https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks + HealthCheck string `json:"healthCheck,omitempty"` + Kind string `json:"kind,omitempty"` PrimaryBackup *RRSetRoutingPolicyPrimaryBackupPolicy `json:"primaryBackup,omitempty"` @@ -2476,20 +2481,27 @@ func (s *RRSetRoutingPolicyGeoPolicyGeoPolicyItem) MarshalJSON() ([]byte, error) // RRSetRoutingPolicyHealthCheckTargets: HealthCheckTargets describes // endpoints to health-check when responding to Routing Policy queries. -// Only the healthy endpoints will be included in the response. +// Only the healthy endpoints will be included in the response. Only one +// of internal_load_balancer and external_endpoints should be set. type RRSetRoutingPolicyHealthCheckTargets struct { + // ExternalEndpoints: The Internet IP addresses to be health checked. + // The format matches the format of ResourceRecordSet.rrdata as defined + // in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) + ExternalEndpoints []string `json:"externalEndpoints,omitempty"` + + // InternalLoadBalancers: Configuration for internal load balancers to + // be health checked. InternalLoadBalancers []*RRSetRoutingPolicyLoadBalancerTarget `json:"internalLoadBalancers,omitempty"` - // ForceSendFields is a list of field names (e.g. - // "InternalLoadBalancers") to unconditionally include in API requests. - // By default, fields with empty or default values are omitted from API - // requests. However, any non-pointer, non-interface field appearing in - // ForceSendFields will be sent to the server regardless of whether the - // field is empty or not. This may be used to include empty fields in - // Patch requests. + // ForceSendFields is a list of field names (e.g. "ExternalEndpoints") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "InternalLoadBalancers") to + // NullFields is a list of field names (e.g. "ExternalEndpoints") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the diff --git a/vendor/google.golang.org/api/iam/v1/iam-api.json b/vendor/google.golang.org/api/iam/v1/iam-api.json index 2d8361f330..610883ce06 100644 --- a/vendor/google.golang.org/api/iam/v1/iam-api.json +++ b/vendor/google.golang.org/api/iam/v1/iam-api.json @@ -2850,7 +2850,7 @@ } } }, - "revision": "20231026", + "revision": "20231130", "rootUrl": "https://iam.googleapis.com/", "schemas": { "AccessRestrictions": { @@ -4410,14 +4410,14 @@ "id": "WorkforcePoolProvider", "properties": { "attributeCondition": { - "description": "A [Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. `google.profile_photo` and `google.display_name` are not supported. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credentials will be accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ``` \"'admins' in google.groups\" ```", + "description": "A [Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. `google.profile_photo`, `google.display_name` and `google.posix_username` are not supported. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credentials will be accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ``` \"'admins' in google.groups\" ```", "type": "string" }, "attributeMapping": { "additionalProperties": { "type": "string" }, - "description": "Required. Maps attributes from the authentication credentials issued by an external identity provider to Google Cloud attributes, such as `subject` and `segment`. Each key must be a string specifying the Google Cloud IAM attribute to map to. The following keys are supported: * `google.subject`: The principal IAM is authenticating. You can reference this value in IAM bindings. This is also the subject that appears in Cloud Logging logs. This is a required field and the mapped subject cannot exceed 127 bytes. * `google.groups`: Groups the authenticating user belongs to. You can grant groups access to resources using an IAM `principalSet` binding; access applies to all members of the group. * `google.display_name`: The name of the authenticated user. This is an optional field and the mapped display name cannot exceed 100 bytes. If not set, `google.subject` will be displayed instead. This attribute cannot be referenced in IAM bindings. * `google.profile_photo`: The URL that specifies the authenticated user's thumbnail photo. This is an optional field. When set, the image will be visible as the user's profile picture. If not set, a generic user icon will be displayed instead. This attribute cannot be referenced in IAM bindings. You can also provide custom attributes by specifying `attribute.{custom_attribute}`, where {custom_attribute} is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. You can reference these attributes in IAM policies to define fine-grained access for a workforce pool to Google Cloud resources. For example: * `google.subject`: `principal://iam.googleapis.com/locations/global/workforcePools/{pool}/subject/{value}` * `google.groups`: `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool}/group/{value}` * `attribute.{custom_attribute}`: `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool}/attribute.{custom_attribute}/{value}` Each value must be a [Common Expression Language] (https://opensource.google/projects/cel) function that maps an identity provider credential to the normalized attribute specified by the corresponding map key. You can use the `assertion` keyword in the expression to access a JSON representation of the authentication credential issued by the provider. The maximum length of an attribute mapping expression is 2048 characters. When evaluated, the total size of all mapped attributes must not exceed 4KB. For OIDC providers, you must supply a custom mapping that includes the `google.subject` attribute. For example, the following maps the `sub` claim of the incoming credential to the `subject` attribute on a Google token: ``` {\"google.subject\": \"assertion.sub\"} ```", + "description": "Required. Maps attributes from the authentication credentials issued by an external identity provider to Google Cloud attributes, such as `subject` and `segment`. Each key must be a string specifying the Google Cloud IAM attribute to map to. The following keys are supported: * `google.subject`: The principal IAM is authenticating. You can reference this value in IAM bindings. This is also the subject that appears in Cloud Logging logs. This is a required field and the mapped subject cannot exceed 127 bytes. * `google.groups`: Groups the authenticating user belongs to. You can grant groups access to resources using an IAM `principalSet` binding; access applies to all members of the group. * `google.display_name`: The name of the authenticated user. This is an optional field and the mapped display name cannot exceed 100 bytes. If not set, `google.subject` will be displayed instead. This attribute cannot be referenced in IAM bindings. * `google.profile_photo`: The URL that specifies the authenticated user's thumbnail photo. This is an optional field. When set, the image will be visible as the user's profile picture. If not set, a generic user icon will be displayed instead. This attribute cannot be referenced in IAM bindings. * `google.posix_username`: The linux username used by OS login. This is an optional field and the mapped posix username cannot exceed 32 characters, The key must match the regex \"^a-zA-Z0-9._{0,31}$\". This attribute cannot be referenced in IAM bindings. You can also provide custom attributes by specifying `attribute.{custom_attribute}`, where {custom_attribute} is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. You can reference these attributes in IAM policies to define fine-grained access for a workforce pool to Google Cloud resources. For example: * `google.subject`: `principal://iam.googleapis.com/locations/global/workforcePools/{pool}/subject/{value}` * `google.groups`: `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool}/group/{value}` * `attribute.{custom_attribute}`: `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool}/attribute.{custom_attribute}/{value}` Each value must be a [Common Expression Language] (https://opensource.google/projects/cel) function that maps an identity provider credential to the normalized attribute specified by the corresponding map key. You can use the `assertion` keyword in the expression to access a JSON representation of the authentication credential issued by the provider. The maximum length of an attribute mapping expression is 2048 characters. When evaluated, the total size of all mapped attributes must not exceed 4KB. For OIDC providers, you must supply a custom mapping that includes the `google.subject` attribute. For example, the following maps the `sub` claim of the incoming credential to the `subject` attribute on a Google token: ``` {\"google.subject\": \"assertion.sub\"} ```", "type": "object" }, "description": { diff --git a/vendor/google.golang.org/api/iam/v1/iam-gen.go b/vendor/google.golang.org/api/iam/v1/iam-gen.go index 8d7a992fd6..1052db9bc9 100644 --- a/vendor/google.golang.org/api/iam/v1/iam-gen.go +++ b/vendor/google.golang.org/api/iam/v1/iam-gen.go @@ -3487,14 +3487,14 @@ type WorkforcePoolProvider struct { // keywords may be referenced in the expressions: * `assertion`: JSON // representing the authentication credential issued by the provider. * // `google`: The Google attributes mapped from the assertion in the - // `attribute_mappings`. `google.profile_photo` and - // `google.display_name` are not supported. * `attribute`: The custom - // attributes mapped from the assertion in the `attribute_mappings`. The - // maximum length of the attribute condition expression is 4096 - // characters. If unspecified, all valid authentication credentials will - // be accepted. The following example shows how to only allow - // credentials with a mapped `google.groups` value of `admins`: ``` - // "'admins' in google.groups" ``` + // `attribute_mappings`. `google.profile_photo`, `google.display_name` + // and `google.posix_username` are not supported. * `attribute`: The + // custom attributes mapped from the assertion in the + // `attribute_mappings`. The maximum length of the attribute condition + // expression is 4096 characters. If unspecified, all valid + // authentication credentials will be accepted. The following example + // shows how to only allow credentials with a mapped `google.groups` + // value of `admins`: ``` "'admins' in google.groups" ``` AttributeCondition string `json:"attributeCondition,omitempty"` // AttributeMapping: Required. Maps attributes from the authentication @@ -3515,15 +3515,18 @@ type WorkforcePoolProvider struct { // specifies the authenticated user's thumbnail photo. This is an // optional field. When set, the image will be visible as the user's // profile picture. If not set, a generic user icon will be displayed - // instead. This attribute cannot be referenced in IAM bindings. You can - // also provide custom attributes by specifying - // `attribute.{custom_attribute}`, where {custom_attribute} is the name - // of the custom attribute to be mapped. You can define a maximum of 50 - // custom attributes. The maximum length of a mapped attribute key is - // 100 characters, and the key may only contain the characters - // [a-z0-9_]. You can reference these attributes in IAM policies to - // define fine-grained access for a workforce pool to Google Cloud - // resources. For example: * `google.subject`: + // instead. This attribute cannot be referenced in IAM bindings. * + // `google.posix_username`: The linux username used by OS login. This is + // an optional field and the mapped posix username cannot exceed 32 + // characters, The key must match the regex "^a-zA-Z0-9._{0,31}$". This + // attribute cannot be referenced in IAM bindings. You can also provide + // custom attributes by specifying `attribute.{custom_attribute}`, where + // {custom_attribute} is the name of the custom attribute to be mapped. + // You can define a maximum of 50 custom attributes. The maximum length + // of a mapped attribute key is 100 characters, and the key may only + // contain the characters [a-z0-9_]. You can reference these attributes + // in IAM policies to define fine-grained access for a workforce pool to + // Google Cloud resources. For example: * `google.subject`: // `principal://iam.googleapis.com/locations/global/workforcePools/{pool} // /subject/{value}` * `google.groups`: // `principalSet://iam.googleapis.com/locations/global/workforcePools/{po diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index a130609e5d..104a911327 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.153.0" +const Version = "0.154.0" diff --git a/vendor/google.golang.org/api/networkservices/v1/networkservices-api.json b/vendor/google.golang.org/api/networkservices/v1/networkservices-api.json index 000cafbd18..e21600e7fb 100644 --- a/vendor/google.golang.org/api/networkservices/v1/networkservices-api.json +++ b/vendor/google.golang.org/api/networkservices/v1/networkservices-api.json @@ -2148,7 +2148,7 @@ } } }, - "revision": "20230926", + "revision": "20231129", "rootUrl": "https://networkservices.googleapis.com/", "schemas": { "AuditConfig": { @@ -2379,7 +2379,7 @@ "type": "object" }, "Gateway": { - "description": "Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway.", + "description": "Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 29", "id": "Gateway", "properties": { "addresses": { @@ -2410,6 +2410,20 @@ "description": "Optional. A fully-qualified GatewaySecurityPolicy URL reference. Defines how a server should apply security policy to inbound (VM to Proxy) initiated connections. For example: `projects/*/locations/*/gatewaySecurityPolicies/swg-policy`. This policy is specific to gateways of type 'SECURE_WEB_GATEWAY'.", "type": "string" }, + "ipVersion": { + "description": "Optional. The IP Version that will be used by this gateway. Valid options are IPV4 or IPV6. Default is IPV4.", + "enum": [ + "IP_VERSION_UNSPECIFIED", + "IPV4", + "IPV6" + ], + "enumDescriptions": [ + "The type when IP version is not specified. Defaults to IPV4.", + "The type for IP version 4.", + "The type for IP version 6." + ], + "type": "string" + }, "labels": { "additionalProperties": { "type": "string" diff --git a/vendor/google.golang.org/api/networkservices/v1/networkservices-gen.go b/vendor/google.golang.org/api/networkservices/v1/networkservices-gen.go index fedb6e1e21..6429d6ced9 100644 --- a/vendor/google.golang.org/api/networkservices/v1/networkservices-gen.go +++ b/vendor/google.golang.org/api/networkservices/v1/networkservices-gen.go @@ -792,7 +792,7 @@ func (s *Expr) MarshalJSON() ([]byte, error) { // a load balancer. It captures the ip:port over which the services are // exposed by the proxy, along with any policy configurations. Routes // have reference to to Gateways to dictate how requests should be -// routed by this Gateway. +// routed by this Gateway. Next id: 29 type Gateway struct { // Addresses: Optional. Zero or one IPv4 or IPv6 address on which the // Gateway will receive the traffic. When no address is provided, an IP @@ -822,6 +822,16 @@ type Gateway struct { // policy is specific to gateways of type 'SECURE_WEB_GATEWAY'. GatewaySecurityPolicy string `json:"gatewaySecurityPolicy,omitempty"` + // IpVersion: Optional. The IP Version that will be used by this + // gateway. Valid options are IPV4 or IPV6. Default is IPV4. + // + // Possible values: + // "IP_VERSION_UNSPECIFIED" - The type when IP version is not + // specified. Defaults to IPV4. + // "IPV4" - The type for IP version 4. + // "IPV6" - The type for IP version 6. + IpVersion string `json:"ipVersion,omitempty"` + // Labels: Optional. Set of label tags associated with the Gateway // resource. Labels map[string]string `json:"labels,omitempty"` diff --git a/vendor/google.golang.org/api/networkservices/v1beta1/networkservices-api.json b/vendor/google.golang.org/api/networkservices/v1beta1/networkservices-api.json index 9ee7417e48..e7e3d8da9a 100644 --- a/vendor/google.golang.org/api/networkservices/v1beta1/networkservices-api.json +++ b/vendor/google.golang.org/api/networkservices/v1beta1/networkservices-api.json @@ -2483,7 +2483,7 @@ } } }, - "revision": "20231018", + "revision": "20231129", "rootUrl": "https://networkservices.googleapis.com/", "schemas": { "AuditConfig": { @@ -2716,7 +2716,7 @@ "type": "string" }, "service": { - "description": "Required. The reference to the service that runs the extension. Must be a reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices).", + "description": "Required. The reference to the service that runs the extension. Currently only Callout extensions are supported here. To configure a Callout extension, `service` must be a fully-qualified reference to a [backend service](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) in the format: `https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/backendServices/{backendService}` or `https://www.googleapis.com/compute/v1/projects/{project}/global/backendServices/{backendService}`.", "type": "string" }, "supportedEvents": { @@ -2753,14 +2753,14 @@ "id": "ExtensionChainMatchCondition", "properties": { "celExpression": { - "description": "Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed.", + "description": "Required. A Common Expression Language (CEL) expression that is used to match requests for which the extension chain is executed. For more information, see [CEL matcher language reference](https://cloud.google.com/service-extensions/docs/cel-matcher-language-reference).", "type": "string" } }, "type": "object" }, "Gateway": { - "description": "Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway.", + "description": "Gateway represents the configuration for a proxy, typically a load balancer. It captures the ip:port over which the services are exposed by the proxy, along with any policy configurations. Routes have reference to to Gateways to dictate how requests should be routed by this Gateway. Next id: 29", "id": "Gateway", "properties": { "addresses": { @@ -2791,6 +2791,20 @@ "description": "Optional. A fully-qualified GatewaySecurityPolicy URL reference. Defines how a server should apply security policy to inbound (VM to Proxy) initiated connections. For example: `projects/*/locations/*/gatewaySecurityPolicies/swg-policy`. This policy is specific to gateways of type 'SECURE_WEB_GATEWAY'.", "type": "string" }, + "ipVersion": { + "description": "Optional. The IP Version that will be used by this gateway. Valid options are IPV4 or IPV6. Default is IPV4.", + "enum": [ + "IP_VERSION_UNSPECIFIED", + "IPV4", + "IPV6" + ], + "enumDescriptions": [ + "The type when IP version is not specified. Defaults to IPV4.", + "The type for IP version 4.", + "The type for IP version 6." + ], + "type": "string" + }, "labels": { "additionalProperties": { "type": "string" diff --git a/vendor/google.golang.org/api/networkservices/v1beta1/networkservices-gen.go b/vendor/google.golang.org/api/networkservices/v1beta1/networkservices-gen.go index a91c2e9b79..6ab68f4595 100644 --- a/vendor/google.golang.org/api/networkservices/v1beta1/networkservices-gen.go +++ b/vendor/google.golang.org/api/networkservices/v1beta1/networkservices-gen.go @@ -772,8 +772,15 @@ type ExtensionChainExtension struct { Name string `json:"name,omitempty"` // Service: Required. The reference to the service that runs the - // extension. Must be a reference to a backend service - // (https://cloud.google.com/compute/docs/reference/rest/v1/backendServices). + // extension. Currently only Callout extensions are supported here. To + // configure a Callout extension, `service` must be a fully-qualified + // reference to a backend service + // (https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) + // in the format: + // `https://www.googleapis.com/compute/v1/projects/{project}/regions/{reg + // ion}/backendServices/{backendService}` or + // `https://www.googleapis.com/compute/v1/projects/{project}/global/backe + // ndServices/{backendService}`. Service string `json:"service,omitempty"` // SupportedEvents: Optional. A set of events during request or response @@ -825,7 +832,9 @@ func (s *ExtensionChainExtension) MarshalJSON() ([]byte, error) { type ExtensionChainMatchCondition struct { // CelExpression: Required. A Common Expression Language (CEL) // expression that is used to match requests for which the extension - // chain is executed. + // chain is executed. For more information, see CEL matcher language + // reference + // (https://cloud.google.com/service-extensions/docs/cel-matcher-language-reference). CelExpression string `json:"celExpression,omitempty"` // ForceSendFields is a list of field names (e.g. "CelExpression") to @@ -855,7 +864,7 @@ func (s *ExtensionChainMatchCondition) MarshalJSON() ([]byte, error) { // a load balancer. It captures the ip:port over which the services are // exposed by the proxy, along with any policy configurations. Routes // have reference to to Gateways to dictate how requests should be -// routed by this Gateway. +// routed by this Gateway. Next id: 29 type Gateway struct { // Addresses: Optional. Zero or one IPv4 or IPv6 address on which the // Gateway will receive the traffic. When no address is provided, an IP @@ -885,6 +894,16 @@ type Gateway struct { // policy is specific to gateways of type 'SECURE_WEB_GATEWAY'. GatewaySecurityPolicy string `json:"gatewaySecurityPolicy,omitempty"` + // IpVersion: Optional. The IP Version that will be used by this + // gateway. Valid options are IPV4 or IPV6. Default is IPV4. + // + // Possible values: + // "IP_VERSION_UNSPECIFIED" - The type when IP version is not + // specified. Defaults to IPV4. + // "IPV4" - The type for IP version 4. + // "IPV6" - The type for IP version 6. + IpVersion string `json:"ipVersion,omitempty"` + // Labels: Optional. Set of label tags associated with the Gateway // resource. Labels map[string]string `json:"labels,omitempty"` diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 6c89799d54..2c5bfb5b30 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,14 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"38383938373230313033363637363637353533\"", + "endpoints": [ + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.me-central2.rep.googleapis.com/", + "location": "me-central2" + } + ], + "etag": "\"3131373432363238303039393730353234383930\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -3799,7 +3806,7 @@ } } }, - "revision": "20231117", + "revision": "20231202", "rootUrl": "https://storage.googleapis.com/", "schemas": { "AnywhereCache": { diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index a07362ffdb..7e322a17c6 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -16,6 +16,7 @@ import ( "time" "go.opencensus.io/plugin/ochttp" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" "golang.org/x/oauth2" "google.golang.org/api/googleapi/transport" @@ -69,6 +70,9 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna requestReason: settings.RequestReason, } var trans http.RoundTripper = paramTransport + // Give OpenTelemetry precedence over OpenCensus in case user configuration + // causes both to write the same header (`X-Cloud-Trace-Context`). + trans = addOpenTelemetryTransport(trans, settings) trans = addOCTransport(trans, settings) switch { case settings.NoAuth: @@ -203,6 +207,13 @@ func fallbackBaseTransport() *http.Transport { } } +func addOpenTelemetryTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper { + if settings.TelemetryDisabled { + return trans + } + return otelhttp.NewTransport(trans) +} + func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper { if settings.TelemetryDisabled { return trans diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml deleted file mode 100644 index 6d03f4d36e..0000000000 --- a/vendor/google.golang.org/appengine/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go_import_path: google.golang.org/appengine - -install: - - ./travis_install.sh - -script: - - ./travis_test.sh - -matrix: - include: - - go: 1.9.x - env: GOAPP=true - - go: 1.10.x - env: GOAPP=false - - go: 1.11.x - env: GO111MODULE=on diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md index ffc2985208..289693613c 100644 --- a/vendor/google.golang.org/appengine/CONTRIBUTING.md +++ b/vendor/google.golang.org/appengine/CONTRIBUTING.md @@ -19,14 +19,12 @@ ## Running system tests -Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`. - Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`. -Run tests with `goapp test`: +Run tests with `go test`: ``` -goapp test -v google.golang.org/appengine/... +go test -v google.golang.org/appengine/... ``` ## Contributor License Agreements diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md index 9fdbacd3c6..5ccddd9990 100644 --- a/vendor/google.golang.org/appengine/README.md +++ b/vendor/google.golang.org/appengine/README.md @@ -1,6 +1,6 @@ # Go App Engine packages -[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) +[![CI Status](https://github.com/golang/appengine/actions/workflows/ci.yml/badge.svg)](https://github.com/golang/appengine/actions/workflows/ci.yml) This repository supports the Go runtime on *App Engine standard*. It provides APIs for interacting with App Engine services. @@ -51,7 +51,7 @@ code importing `appengine/datastore` will now need to import `google.golang.org/ Most App Engine services are available with exactly the same API. A few APIs were cleaned up, and there are some differences: -* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. +* `appengine.Context` has been replaced with the `Context` type from `context`. * Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. * `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. * `appengine.Datacenter` now takes a `context.Context` argument. @@ -72,7 +72,7 @@ A few APIs were cleaned up, and there are some differences: * `appengine/socket` is not required on App Engine flexible environment / Managed VMs. Use the standard `net` package instead. -## Key Encode/Decode compatibiltiy to help with datastore library migrations +## Key Encode/Decode compatibility to help with datastore library migrations Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore. The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type. diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go index 8c9697674f..35ba9c8967 100644 --- a/vendor/google.golang.org/appengine/appengine.go +++ b/vendor/google.golang.org/appengine/appengine.go @@ -9,10 +9,10 @@ package appengine // import "google.golang.org/appengine" import ( + "context" "net/http" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" ) @@ -35,18 +35,18 @@ import ( // // Main is designed so that the app's main package looks like this: // -// package main +// package main // -// import ( -// "google.golang.org/appengine" +// import ( +// "google.golang.org/appengine" // -// _ "myapp/package0" -// _ "myapp/package1" -// ) +// _ "myapp/package0" +// _ "myapp/package1" +// ) // -// func main() { -// appengine.Main() -// } +// func main() { +// appengine.Main() +// } // // The "myapp/packageX" packages are expected to register HTTP handlers // in their init functions. @@ -54,6 +54,9 @@ func Main() { internal.Main() } +// Middleware wraps an http handler so that it can make GAE API calls +var Middleware func(http.Handler) http.Handler = internal.Middleware + // IsDevAppServer reports whether the App Engine app is running in the // development App Server. func IsDevAppServer() bool { diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go index f4b645aad3..6e1d041cd9 100644 --- a/vendor/google.golang.org/appengine/appengine_vm.go +++ b/vendor/google.golang.org/appengine/appengine_vm.go @@ -2,19 +2,19 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package appengine import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" + "context" ) // BackgroundContext returns a context not associated with a request. -// This should only be used when not servicing a request. -// This only works in App Engine "flexible environment". +// +// Deprecated: App Engine no longer has a special background context. +// Just use context.Background(). func BackgroundContext() context.Context { - return internal.BackgroundContext() + return context.Background() } diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go index b8dcf8f361..1202fc1a53 100644 --- a/vendor/google.golang.org/appengine/identity.go +++ b/vendor/google.golang.org/appengine/identity.go @@ -5,10 +5,9 @@ package appengine import ( + "context" "time" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/app_identity" modpb "google.golang.org/appengine/internal/modules" diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index 721053c20a..0569f5dd43 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -2,12 +2,14 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -24,7 +26,6 @@ import ( "time" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" logpb "google.golang.org/appengine/internal/log" @@ -32,8 +33,7 @@ import ( ) const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" + apiPath = "/rpc_http" ) var ( @@ -65,21 +65,22 @@ var ( IdleConnTimeout: 90 * time.Second, }, } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context ) -func apiURL() *url.URL { +func apiURL(ctx context.Context) *url.URL { host, port := "appengine.googleapis.internal", "10001" if h := os.Getenv("API_HOST"); h != "" { host = h } + if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil { + host = hostOverride.(string) + } if p := os.Getenv("API_PORT"); p != "" { port = p } + if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil { + port = portOverride.(string) + } return &url.URL{ Scheme: "http", Host: host + ":" + port, @@ -87,82 +88,97 @@ func apiURL() *url.URL { } } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) - - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } - - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) - - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more - - stopFlushing <- 1 // any logging beyond this point will be dropped - - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) - - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed +// Middleware wraps an http handler so that it can make GAE API calls +func Middleware(next http.Handler) http.Handler { + return handleHTTPMiddleware(executeRequestSafelyMiddleware(next)) } -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 +func handleHTTPMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c := &aeContext{ + req: r, + outHeader: w.Header(), } - }() + r = r.WithContext(withContext(r.Context(), c)) + c.req = r - http.DefaultServeMux.ServeHTTP(c, r) + stopFlushing := make(chan int) + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } + + if logToLogservice() { + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + } + + next.ServeHTTP(c, r) + c.outHeader = nil // make sure header changes aren't respected any more + + flushed := make(chan struct{}) + if logToLogservice() { + stopFlushing <- 1 // any logging beyond this point will be dropped + + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + } + + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } + if logToLogservice() { + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed + } + }) +} + +func executeRequestSafelyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if x := recover(); x != nil { + c := w.(*aeContext) + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() + + next.ServeHTTP(w, r) + }) } func renderPanic(x interface{}) string { @@ -204,9 +220,9 @@ func renderPanic(x interface{}) string { return string(buf) } -// context represents the context of an in-flight HTTP request. +// aeContext represents the aeContext of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { +type aeContext struct { req *http.Request outCode int @@ -218,8 +234,6 @@ type context struct { lines []*logpb.UserAppLogLine flushes int } - - apiURL *url.URL } var contextKey = "holds a *context" @@ -227,8 +241,8 @@ var contextKey = "holds a *context" // jointContext joins two contexts in a superficial way. // It takes values and timeouts from a base context, and only values from another context. type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context + base context.Context + valuesOnly context.Context } func (c jointContext) Deadline() (time.Time, bool) { @@ -252,94 +266,54 @@ func (c jointContext) Value(key interface{}) interface{} { // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) +func fromContext(ctx context.Context) *aeContext { + c, _ := ctx.Value(&contextKey).(*aeContext) return c } -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c *aeContext) context.Context { + ctx := context.WithValue(parent, &contextKey, c) if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { ctx = withNamespace(ctx, ns) } return ctx } -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) +func toContext(c *aeContext) context.Context { + return withContext(context.Background(), c) } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { return c.req.Header } return nil } -func ReqContext(req *http.Request) netcontext.Context { +func ReqContext(req *http.Request) context.Context { return req.Context() } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { return jointContext{ base: parent, valuesOnly: req.Context(), } } -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - // RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. +// any API calls are sent to the provided URL. // It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} +func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request { + ctx := req.Context() + ctx = withAPIHostOverride(ctx, apiURL.Hostname()) + ctx = withAPIPortOverride(ctx, apiURL.Port()) + ctx = WithAppIDOverride(ctx, appID) + + // use the unregistered request as a placeholder so that withContext can read the headers + c := &aeContext{req: req} + c.req = req.WithContext(withContext(ctx, c)) + return c.req } var errTimeout = &CallError{ @@ -348,7 +322,7 @@ var errTimeout = &CallError{ Timeout: true, } -func (c *context) Header() http.Header { return c.outHeader } +func (c *aeContext) Header() http.Header { return c.outHeader } // Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status // codes do not permit a response body (nor response entity headers such as @@ -365,7 +339,7 @@ func bodyAllowedForStatus(status int) bool { return true } -func (c *context) Write(b []byte) (int, error) { +func (c *aeContext) Write(b []byte) (int, error) { if c.outCode == 0 { c.WriteHeader(http.StatusOK) } @@ -376,7 +350,7 @@ func (c *context) Write(b []byte) (int, error) { return len(b), nil } -func (c *context) WriteHeader(code int) { +func (c *aeContext) WriteHeader(code int) { if c.outCode != 0 { logf(c, 3, "WriteHeader called multiple times on request.") // error level return @@ -384,10 +358,11 @@ func (c *context) WriteHeader(code int) { c.outCode = code } -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { +func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) { + apiURL := apiURL(ctx) hreq := &http.Request{ Method: "POST", - URL: c.apiURL, + URL: apiURL, Header: http.Header{ apiEndpointHeader: apiEndpointHeaderValue, apiMethodHeader: apiMethodHeaderValue, @@ -396,13 +371,16 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) }, Body: ioutil.NopCloser(bytes.NewReader(body)), ContentLength: int64(len(body)), - Host: c.apiURL.Host, + Host: apiURL.Host, } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) - } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) + c := fromContext(ctx) + if c != nil { + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } } tr := apiHTTPClient.Transport.(*http.Transport) @@ -444,7 +422,7 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) return hrespBody, nil } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -463,15 +441,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) } c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { - return errors.New("transaction context has expired") + return errors.New("transaction aeContext has expired") } applyTransaction(in, &t.transaction) } @@ -487,20 +461,13 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix + ticket := "" + if c != nil { + ticket = c.req.Header.Get(ticketHeader) + if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { + ticket = dri } } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } req := &remotepb.Request{ ServiceName: &service, Method: &method, @@ -512,7 +479,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - hrespBody, err := c.post(hreqBody, timeout) + hrespBody, err := post(ctx, hreqBody, timeout) if err != nil { return err } @@ -549,11 +516,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return proto.Unmarshal(res.Response, out) } -func (c *context) Request() *http.Request { +func (c *aeContext) Request() *http.Request { return c.req } -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { +func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) { // Truncate long log lines. // TODO(dsymonds): Check if this is still necessary. const lim = 8 << 10 @@ -575,18 +542,20 @@ var logLevelName = map[int64]string{ 4: "CRITICAL", } -func logf(c *context, level int64, format string, args ...interface{}) { +func logf(c *aeContext, level int64, format string, args ...interface{}) { if c == nil { - panic("not an App Engine context") + panic("not an App Engine aeContext") } s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation + if logToLogservice() { + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + } + // Log to stdout if not deployed if !IsSecondGen() { log.Print(logLevelName[level] + ": " + s) } @@ -594,7 +563,7 @@ func logf(c *context, level int64, format string, args ...interface{}) { // flushLog attempts to flush any pending logs to the appserver. // It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { +func (c *aeContext) flushLog(force bool) (flushed bool) { c.pendingLogs.Lock() // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. n, rem := 0, 30<<20 @@ -655,7 +624,7 @@ const ( forceFlushInterval = 60 * time.Second ) -func (c *context) logFlusher(stop <-chan int) { +func (c *aeContext) logFlusher(stop <-chan int) { lastFlush := time.Now() tick := time.NewTicker(flushInterval) for { @@ -673,6 +642,12 @@ func (c *context) logFlusher(stop <-chan int) { } } -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return toContext(&aeContext{req: req}) +} + +func logToLogservice() bool { + // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json + // where $LOG_DIR is /var/log in prod and some tmpdir in dev + return os.Getenv("LOG_TO_LOGSERVICE") != "0" } diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go index f0f40b2e35..87c33c798e 100644 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -2,11 +2,13 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( + "context" "errors" "fmt" "net/http" @@ -17,20 +19,19 @@ import ( basepb "appengine_internal/base" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) var contextKey = "holds an appengine.Context" // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { +func fromContext(ctx context.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { +func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) { c := fromContext(ctx) if c == nil { return nil, errNotAppEngineContext @@ -38,8 +39,8 @@ func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error return c, nil } -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c appengine.Context) context.Context { + ctx := context.WithValue(parent, &contextKey, c) s := &basepb.StringProto{} c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) @@ -50,7 +51,7 @@ func withContext(parent netcontext.Context, c appengine.Context) netcontext.Cont return ctx } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { if req, ok := c.Request().(*http.Request); ok { return req.Header @@ -59,11 +60,11 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) +func ReqContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { c := appengine.NewContext(req) return withContext(parent, c) } @@ -83,11 +84,11 @@ func (t *testingContext) Call(service, method string, _, _ appengine_internal.Pr } func (t *testingContext) Request() interface{} { return t.req } -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return withContext(context.Background(), &testingContext{req: req}) } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -144,8 +145,8 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") +func Middleware(next http.Handler) http.Handler { + panic("Middleware called; this should be impossible") } func logf(c appengine.Context, level int64, format string, args ...interface{}) { diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go index e0c0b214b7..5b95c13d92 100644 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -5,20 +5,26 @@ package internal import ( + "context" "errors" "os" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) +type ctxKey string + +func (c ctxKey) String() string { + return "appengine context key: " + string(c) +} + var errNotAppEngineContext = errors.New("not an App Engine context") -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error +type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { +func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context { // We avoid appending to any existing call override // so we don't risk overwriting a popped stack below. var cofs []CallOverrideFunc @@ -26,10 +32,10 @@ func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Con cofs = append(cofs, uf...) } cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) + return context.WithValue(ctx, &callOverrideKey, cofs) } -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { +func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) { cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) if len(cofs) == 0 { return nil, nil, false @@ -37,7 +43,7 @@ func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netconte // We found a list of overrides; grab the last, and reconstitute a // context that will hide it. f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) return f, ctx, true } @@ -45,23 +51,35 @@ type logOverrideFunc func(level int64, format string, args ...interface{}) var logOverrideKey = "holds a logOverrideFunc" -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) +func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context { + return context.WithValue(ctx, &logOverrideKey, f) } var appIDOverrideKey = "holds a string, being the full app ID" -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +func WithAppIDOverride(ctx context.Context, appID string) context.Context { + return context.WithValue(ctx, &appIDOverrideKey, appID) +} + +var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST") + +func withAPIHostOverride(ctx context.Context, apiHost string) context.Context { + return context.WithValue(ctx, apiHostOverrideKey, apiHost) +} + +var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT") + +func withAPIPortOverride(ctx context.Context, apiPort string) context.Context { + return context.WithValue(ctx, apiPortOverrideKey, apiPort) } var namespaceKey = "holds the namespace string" -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) +func withNamespace(ctx context.Context, ns string) context.Context { + return context.WithValue(ctx, &namespaceKey, ns) } -func NamespaceFromContext(ctx netcontext.Context) string { +func NamespaceFromContext(ctx context.Context) string { // If there's no namespace, return the empty string. ns, _ := ctx.Value(&namespaceKey).(string) return ns @@ -70,14 +88,14 @@ func NamespaceFromContext(ctx netcontext.Context) string { // FullyQualifiedAppID returns the fully-qualified application ID. // This may contain a partition prefix (e.g. "s~" for High Replication apps), // or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { +func FullyQualifiedAppID(ctx context.Context) string { if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { return id } return fullyQualifiedAppID(ctx) } -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { +func Logf(ctx context.Context, level int64, format string, args ...interface{}) { if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { f(level, format, args...) return @@ -90,7 +108,7 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{ } // NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { +func NamespacedContext(ctx context.Context, namespace string) context.Context { return withNamespace(ctx, namespace) } diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go index 9b4134e425..0f95aa91d5 100644 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -5,9 +5,8 @@ package internal import ( + "context" "os" - - netcontext "golang.org/x/net/context" ) var ( @@ -23,7 +22,7 @@ var ( // AppID is the implementation of the wrapper function of the same name in // ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { +func AppID(c context.Context) string { return appID(FullyQualifiedAppID(c)) } @@ -35,7 +34,7 @@ func IsStandard() bool { return appengineStandard || IsSecondGen() } -// IsStandard is the implementation of the wrapper function of the same name in +// IsSecondGen is the implementation of the wrapper function of the same name in // ../appengine.go. See that file for commentary. func IsSecondGen() bool { // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go index 4e979f45e3..5ad3548bf7 100644 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -2,21 +2,22 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( - "appengine" + "context" - netcontext "golang.org/x/net/context" + "appengine" ) func init() { appengineStandard = true } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -24,12 +25,12 @@ func DefaultVersionHostname(ctx netcontext.Context) string { return appengine.DefaultVersionHostname(c) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } +func Datacenter(_ context.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -37,14 +38,14 @@ func RequestID(ctx netcontext.Context) string { return appengine.RequestID(c) } -func ModuleName(ctx netcontext.Context) string { +func ModuleName(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) } return appengine.ModuleName(c) } -func VersionID(ctx netcontext.Context) string { +func VersionID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -52,7 +53,7 @@ func VersionID(ctx netcontext.Context) string { return appengine.VersionID(c) } -func fullyQualifiedAppID(ctx netcontext.Context) string { +func fullyQualifiedAppID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go index d5e2e7b5e3..4201b6b585 100644 --- a/vendor/google.golang.org/appengine/internal/identity_flex.go +++ b/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appenginevm // +build appenginevm package internal diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go index 5d80672635..18ddda3a42 100644 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -2,17 +2,17 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( + "context" "log" "net/http" "os" "strings" - - netcontext "golang.org/x/net/context" ) // These functions are implementations of the wrapper functions @@ -24,7 +24,7 @@ const ( hDatacenter = "X-AppEngine-Datacenter" ) -func ctxHeaders(ctx netcontext.Context) http.Header { +func ctxHeaders(ctx context.Context) http.Header { c := fromContext(ctx) if c == nil { return nil @@ -32,15 +32,15 @@ func ctxHeaders(ctx netcontext.Context) http.Header { return c.Request().Header } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { return ctxHeaders(ctx).Get(hDefaultVersionHostname) } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { return ctxHeaders(ctx).Get(hRequestLogId) } -func Datacenter(ctx netcontext.Context) string { +func Datacenter(ctx context.Context) string { if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { return dc } @@ -71,7 +71,7 @@ func ServerSoftware() string { // TODO(dsymonds): Remove the metadata fetches. -func ModuleName(_ netcontext.Context) string { +func ModuleName(_ context.Context) string { if s := os.Getenv("GAE_MODULE_NAME"); s != "" { return s } @@ -81,7 +81,7 @@ func ModuleName(_ netcontext.Context) string { return string(mustGetMetadata("instance/attributes/gae_backend_name")) } -func VersionID(_ netcontext.Context) string { +func VersionID(_ context.Context) string { if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { return s1 + "." + s2 } @@ -112,7 +112,7 @@ func partitionlessAppID() string { return string(mustGetMetadata("instance/attributes/gae_project")) } -func fullyQualifiedAppID(_ netcontext.Context) string { +func fullyQualifiedAppID(_ context.Context) string { if s := os.Getenv("GAE_APPLICATION"); s != "" { return s } @@ -130,5 +130,5 @@ func fullyQualifiedAppID(_ netcontext.Context) string { } func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev" } diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go index 1e765312fd..afd0ae84fd 100644 --- a/vendor/google.golang.org/appengine/internal/main.go +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go index ddb79a3338..86a8caf06f 100644 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal @@ -29,7 +30,7 @@ func Main() { if IsDevAppServer() { host = "127.0.0.1" } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go index 9006ae6538..2ae8ab9fa4 100644 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -7,11 +7,11 @@ package internal // This file implements hooks for applying datastore transactions. import ( + "context" "errors" "reflect" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/datastore" @@ -38,13 +38,13 @@ func applyTransaction(pb proto.Message, t *pb.Transaction) { var transactionKey = "used for *Transaction" -func transactionFromContext(ctx netcontext.Context) *transaction { +func transactionFromContext(ctx context.Context) *transaction { t, _ := ctx.Value(&transactionKey).(*transaction) return t } -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) +func withTransaction(ctx context.Context, t *transaction) context.Context { + return context.WithValue(ctx, &transactionKey, t) } type transaction struct { @@ -54,7 +54,7 @@ type transaction struct { var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { +func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { if transactionFromContext(c) != nil { return nil, errors.New("nested transactions are not supported") } diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go index 21860ca082..6f169be487 100644 --- a/vendor/google.golang.org/appengine/namespace.go +++ b/vendor/google.golang.org/appengine/namespace.go @@ -5,11 +5,10 @@ package appengine import ( + "context" "fmt" "regexp" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" ) diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go index 05642a992a..fcf3ad0a58 100644 --- a/vendor/google.golang.org/appengine/timeout.go +++ b/vendor/google.golang.org/appengine/timeout.go @@ -4,7 +4,7 @@ package appengine -import "golang.org/x/net/context" +import "context" // IsTimeoutError reports whether err is a timeout error. func IsTimeoutError(err error) bool { diff --git a/vendor/google.golang.org/appengine/travis_install.sh b/vendor/google.golang.org/appengine/travis_install.sh deleted file mode 100644 index 785b62f46e..0000000000 --- a/vendor/google.golang.org/appengine/travis_install.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -if [[ $GO111MODULE == "on" ]]; then - go get . -else - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine) -fi - -if [[ $GOAPP == "true" ]]; then - mkdir /tmp/sdk - curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" - unzip -q /tmp/sdk.zip -d /tmp/sdk - # NOTE: Set the following env vars in the test script: - # export PATH="$PATH:/tmp/sdk/go_appengine" - # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py -fi - diff --git a/vendor/google.golang.org/appengine/travis_test.sh b/vendor/google.golang.org/appengine/travis_test.sh deleted file mode 100644 index d4390f045b..0000000000 --- a/vendor/google.golang.org/appengine/travis_test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -go version -go test -v google.golang.org/appengine/... -go test -v -race google.golang.org/appengine/... -if [[ $GOAPP == "true" ]]; then - export PATH="$PATH:/tmp/sdk/go_appengine" - export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py - goapp version - goapp test -v google.golang.org/appengine/... -fi diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go index 6ffe1e6d90..6c0d72418d 100644 --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -7,6 +7,7 @@ package urlfetch // import "google.golang.org/appengine/urlfetch" import ( + "context" "errors" "fmt" "io" @@ -18,7 +19,6 @@ import ( "time" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/urlfetch" @@ -44,11 +44,10 @@ type Transport struct { var _ http.RoundTripper = (*Transport)(nil) // Client returns an *http.Client using a default urlfetch Transport. This -// client will have the default deadline of 5 seconds, and will check the -// validity of SSL certificates. +// client will check the validity of SSL certificates. // -// Any deadline of the provided context will be used for requests through this client; -// if the client does not have a deadline then a 5 second default is used. +// Any deadline of the provided context will be used for requests through this client. +// If the client does not have a deadline, then an App Engine default of 60 second is used. func Client(ctx context.Context) *http.Client { return &http.Client{ Transport: &Transport{ diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_wrapper.go similarity index 57% rename from vendor/google.golang.org/grpc/balancer_conn_wrappers.go rename to vendor/google.golang.org/grpc/balancer_wrapper.go index a4411c22bf..b5e30cff02 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -32,21 +32,13 @@ import ( "google.golang.org/grpc/resolver" ) -type ccbMode int - -const ( - ccbModeActive = iota - ccbModeIdle - ccbModeClosed - ccbModeExitingIdle -) - // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the // balancer.Balancer interface. The ClientConn is free to call these methods // concurrently and the ccBalancerWrapper ensures that calls from the ClientConn -// to the Balancer happen synchronously and in order. +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. // // ccBalancerWrapper also implements the balancer.ClientConn interface and is // passed to the Balancer implementations. It invokes unexported methods on the @@ -57,87 +49,75 @@ const ( type ccBalancerWrapper struct { // The following fields are initialized when the wrapper is created and are // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - opts balancer.BuildOptions + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc - // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled in the serializer. Fields - // accessed *only* in these serializer callbacks, can therefore be accessed - // without a mutex. - balancer *gracefulswitch.Balancer + // The following fields are only accessed within the serializer or during + // initialization. curBalancerName string + balancer *gracefulswitch.Balancer - // mu guards access to the below fields. Access to the serializer and its - // cancel function needs to be mutex protected because they are overwritten - // when the wrapper exits idle mode. - mu sync.Mutex - serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. - serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. - mode ccbMode // Tracks the current mode of the wrapper. + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool } -// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer -// is not created until the switchTo() method is invoked. -func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { - ctx, cancel := context.WithCancel(context.Background()) +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) ccb := &ccBalancerWrapper{ - cc: cc, - opts: bopts, + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, } - ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) return ccb } // updateClientConnState is invoked by grpc to push a ClientConnState update to -// the underlying balancer. +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.mu.Lock() - errCh := make(chan error, 1) - // Here and everywhere else where Schedule() is called, it is done with the - // lock held. But the lock guards only the scheduling part. The actual - // callback is called asynchronously without the lock being held. - ok := ccb.serializer.Schedule(func(_ context.Context) { - errCh <- ccb.balancer.UpdateClientConnState(*ccs) + errCh := make(chan error) + ok := ccb.serializer.Schedule(func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + errCh <- err }) if !ok { - // If we are unable to schedule a function with the serializer, it - // indicates that it has been closed. A serializer is only closed when - // the wrapper is closed or is in idle. - ccb.mu.Unlock() - return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") + return nil } - ccb.mu.Unlock() - - // We get here only if the above call to Schedule succeeds, in which case it - // is guaranteed that the scheduled function will run. Therefore it is safe - // to block on this channel. - err := <-errCh - if logger.V(2) && err != nil { - logger.Infof("error from balancer.UpdateClientConnState: %v", err) - } - return err -} - -// updateSubConnState is invoked by grpc to push a subConn state update to the -// underlying balancer. -func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - // Even though it is optional for balancers, gracefulswitch ensures - // opts.StateListener is set, so this cannot ever be nil. - sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) - }) - ccb.mu.Unlock() + return <-errCh } +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } ccb.balancer.ResolverError(err) }) - ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -151,8 +131,10 @@ func (ccb *ccBalancerWrapper) resolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } // TODO: Other languages use case-sensitive balancer registries. We should // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. if strings.EqualFold(ccb.curBalancerName, name) { @@ -160,7 +142,6 @@ func (ccb *ccBalancerWrapper) switchTo(name string) { } ccb.buildLoadBalancingPolicy(name) }) - ccb.mu.Unlock() } // buildLoadBalancingPolicy performs the following: @@ -187,115 +168,49 @@ func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { ccb.curBalancerName = builder.Name() } +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. func (ccb *ccBalancerWrapper) close() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") - ccb.closeBalancer(ccbModeClosed) -} - -// enterIdleMode is invoked by grpc when the channel enters idle mode upon -// expiry of idle_timeout. This call blocks until the balancer is closed. -func (ccb *ccBalancerWrapper) enterIdleMode() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") - ccb.closeBalancer(ccbModeIdle) -} - -// closeBalancer is invoked when the channel is being closed or when it enters -// idle mode upon expiry of idle_timeout. -func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { ccb.mu.Lock() - if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { - ccb.mu.Unlock() - return - } - - ccb.mode = m - done := ccb.serializer.Done() - b := ccb.balancer - ok := ccb.serializer.Schedule(func(_ context.Context) { - // Close the serializer to ensure that no more calls from gRPC are sent - // to the balancer. - ccb.serializerCancel() - // Empty the current balancer name because we don't have a balancer - // anymore and also so that we act on the next call to switchTo by - // creating a new balancer specified by the new resolver. - ccb.curBalancerName = "" - }) - if !ok { - ccb.mu.Unlock() - return - } + ccb.closed = true ccb.mu.Unlock() - - // Give enqueued callbacks a chance to finish before closing the balancer. - <-done - b.Close() -} - -// exitIdleMode is invoked by grpc when the channel exits idle mode either -// because of an RPC or because of an invocation of the Connect() API. This -// recreates the balancer that was closed previously when entering idle mode. -// -// If the channel is not in idle mode, we know for a fact that we are here as a -// result of the user calling the Connect() method on the ClientConn. In this -// case, we can simply forward the call to the underlying balancer, instructing -// it to reconnect to the backends. -func (ccb *ccBalancerWrapper) exitIdleMode() { - ccb.mu.Lock() - if ccb.mode == ccbModeClosed { - // Request to exit idle is a no-op when wrapper is already closed. - ccb.mu.Unlock() - return - } - - if ccb.mode == ccbModeIdle { - // Recreate the serializer which was closed when we entered idle. - ctx, cancel := context.WithCancel(context.Background()) - ccb.serializer = grpcsync.NewCallbackSerializer(ctx) - ccb.serializerCancel = cancel - } - - // The ClientConn guarantees that mutual exclusion between close() and - // exitIdleMode(), and since we just created a new serializer, we can be - // sure that the below function will be scheduled. - done := make(chan struct{}) - ccb.serializer.Schedule(func(_ context.Context) { - defer close(done) - - ccb.mu.Lock() - defer ccb.mu.Unlock() - - if ccb.mode != ccbModeIdle { - ccb.balancer.ExitIdle() + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.serializer.Schedule(func(context.Context) { + if ccb.balancer == nil { return } - - // Gracefulswitch balancer does not support a switchTo operation after - // being closed. Hence we need to create a new one here. - ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) - ccb.mode = ccbModeActive - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") - + ccb.balancer.Close() + ccb.balancer = nil }) - ccb.mu.Unlock() - - <-done + ccb.serializerCancel() } -func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { - ccb.mu.Lock() - defer ccb.mu.Unlock() - return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if ccb.isIdleOrClosed() { - return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") } + ccb.mu.Unlock() if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ac, err := ccb.cc.newAddrConn(addrs, opts) + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) if err != nil { channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err @@ -316,10 +231,6 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - if ccb.isIdleOrClosed() { - return - } - acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -328,25 +239,39 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - + ccb.mu.Unlock() // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is // updated later, we could call the "connecting" picker when the state is // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(s.Picker) + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) ccb.cc.csMgr.updateState(s.ConnectivityState) } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - - ccb.cc.resolveNow(o) + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) } func (ccb *ccBalancerWrapper) Target() string { @@ -364,6 +289,20 @@ type acBalancerWrapper struct { producers map[balancer.ProducerBuilder]*refCountedProducer } +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { + acbw.ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) +} + func (acbw *acBalancerWrapper) String() string { return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) } @@ -377,20 +316,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { - ccb := acbw.ccb - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } // NewStream begins a streaming RPC on the addrConn. If the addrConn is not diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 429c389e47..e6f2625b68 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -33,9 +33,7 @@ import ( "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" @@ -48,9 +46,9 @@ import ( "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. - _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. ) const ( @@ -119,23 +117,8 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. -// -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. -// -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. -// -// The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { +// newClient returns a new client in idle mode. +func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, conns: make(map[*addrConn]struct{}), @@ -143,23 +126,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * czData: new(channelzData), } - // We start the channel off in idle mode, but kick it out of idle at the end - // of this method, instead of waiting for the first RPC. Other gRPC - // implementations do wait for the first RPC to kick the channel out of - // idle. But doing so would be a major behavior change for our users who are - // used to seeing the channel active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, if at all we ever get to do that. - cc.idlenessState = ccIdlenessStateIdle - cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - cc.exitIdleCond = sync.NewCond(&cc.mu) + // Apply dial options. disableGlobalOpts := false for _, opt := range opts { if _, ok := opt.(*disableGlobalDialOptions); ok { @@ -177,21 +148,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * for _, opt := range opts { opt.apply(&cc.dopts) } - chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) - defer func() { - if err != nil { - cc.Close() - } - }() - - // Register ClientConn with channelz. - cc.channelzRegistration(target) - - cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) - if err := cc.validateTransportCredentials(); err != nil { return nil, err } @@ -205,10 +164,80 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.UserAgent != "" { - cc.dopts.copts.UserAgent += " " + grpcUA - } else { - cc.dopts.copts.UserAgent = grpcUA + // Register ClientConn with channelz. + cc.channelzRegistration(target) + + // TODO: Ideally it should be impossible to error from this function after + // channelz registration. This will require removing some channelz logs + // from the following functions that can error. Errors can be returned to + // the user, and successful logs can be emitted here, after the checks have + // passed and channelz is subsequently registered. + + // Determine the resolver to use. + if err := cc.parseTargetAndFindResolver(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + if err = cc.determineAuthority(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc, err := newClient(target, opts...) + if err != nil { + return nil, err + } + + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. Other gRPC implementations do wait + // for the first RPC to kick the channel out of idle. But doing so would be + // a major behavior change for our users who are used to seeing the channel + // active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, i.e. by making newClient exported. + + defer func() { + if err != nil { + cc.Close() + } + }() + + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err + } + + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil } if cc.dopts.timeout > 0 { @@ -231,49 +260,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if cc.dopts.bs == nil { - cc.dopts.bs = backoff.DefaultExponential - } - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - return nil, err - } - if err = cc.determineAuthority(); err != nil { - return nil, err - } - - if cc.dopts.scChan != nil { - // Blocking wait for the initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } - if cc.dopts.scChan != nil { - go cc.scWatcher() - } - - // This creates the name resolver, load balancer, blocking picker etc. - if err := cc.exitIdleMode(); err != nil { - return nil, err - } - - // Configure idleness support with configured idle timeout or default idle - // timeout duration. Idleness can be explicitly disabled by the user, by - // setting the dial option to 0. - cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) - - // Return early for non-blocking dials. - if !cc.dopts.block { - return cc, nil - } - // A blocking dial blocks until the clientConn is ready. for { s := cc.GetState() @@ -320,8 +306,8 @@ func (cc *ClientConn) addTraceEvent(msg string) { type idler ClientConn -func (i *idler) EnterIdleMode() error { - return (*ClientConn)(i).enterIdleMode() +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() } func (i *idler) ExitIdleMode() error { @@ -329,117 +315,71 @@ func (i *idler) ExitIdleMode() error { } // exitIdleMode moves the channel out of idle mode by recreating the name -// resolver and load balancer. -func (cc *ClientConn) exitIdleMode() error { +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return errConnClosing } - if cc.idlenessState != ccIdlenessStateIdle { - channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) - cc.mu.Unlock() - return nil - } - - defer func() { - // When Close() and exitIdleMode() race against each other, one of the - // following two can happen: - // - Close() wins the race and runs first. exitIdleMode() runs after, and - // sees that the ClientConn is already closed and hence returns early. - // - exitIdleMode() wins the race and runs first and recreates the balancer - // and releases the lock before recreating the resolver. If Close() runs - // in this window, it will wait for exitIdleMode to complete. - // - // We achieve this synchronization using the below condition variable. - cc.mu.Lock() - cc.idlenessState = ccIdlenessStateActive - cc.exitIdleCond.Signal() - cc.mu.Unlock() - }() - - cc.idlenessState = ccIdlenessStateExitingIdle - exitedIdle := false - if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) - } else { - cc.blockingpicker.exitIdleMode() - exitedIdle = true - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - if cc.balancerWrapper == nil { - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) - } else { - cc.balancerWrapper.exitIdleMode() - } - cc.firstResolveEvent = grpcsync.NewEvent() cc.mu.Unlock() // This needs to be called without cc.mu because this builds a new resolver - // which might update state or report error inline which needs to be handled - // by cc.updateResolverState() which also grabs cc.mu. - if err := cc.initResolverWrapper(credsClone); err != nil { + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { return err } - if exitedIdle { - cc.addTraceEvent("exiting idle mode") - } + cc.addTraceEvent("exiting idle mode") return nil } -// enterIdleMode puts the channel in idle mode, and as part of it shuts down the -// name resolver, load balancer and any subchannels. -func (cc *ClientConn) enterIdleMode() error { - cc.mu.Lock() - defer cc.mu.Unlock() - - if cc.conns == nil { - return ErrClientConnClosing - } - if cc.idlenessState != ccIdlenessStateActive { - channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) - return nil - } - +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() // cc.conns == nil is a proxy for the ClientConn being closed. So, instead // of setting it to nil here, we recreate the map. This also means that we // don't have to do this when exiting idle mode. - conns := cc.conns cc.conns = make(map[*addrConn]struct{}) +} - // TODO: Currently, we close the resolver wrapper upon entering idle mode - // and create a new one upon exiting idle mode. This means that the - // `cc.resolverWrapper` field would be overwritten everytime we exit idle - // mode. While this means that we need to hold `cc.mu` when accessing - // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should - // try to do the same for the balancer and picker wrappers too. - cc.resolverWrapper.close() - cc.blockingpicker.enterIdleMode() - cc.balancerWrapper.enterIdleMode() +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { + cc.mu.Lock() + + if cc.conns == nil { + cc.mu.Unlock() + return + } + + conns := cc.conns + + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() cc.csMgr.updateState(connectivity.Idle) - cc.idlenessState = ccIdlenessStateIdle cc.addTraceEvent("entering idle mode") - go func() { - for ac := range conns { - ac.tearDown(errConnIdling) - } - }() + cc.initIdleStateLocked() - return nil + cc.mu.Unlock() + + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } } // validateTransportCredentials performs a series of checks on the configured @@ -649,66 +589,35 @@ type ClientConn struct { dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idle.Manager + idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. csMgr *connectivityStateManager - blockingpicker *pickerWrapper + pickerWrapper *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector czData *channelzData retryThrottler atomic.Value // Updated from service config. - // firstResolveEvent is used to track whether the name resolver sent us at - // least one update. RPCs block on this event. - firstResolveEvent *grpcsync.Event - // mu protects the following fields. // TODO: split mu so the same mutex isn't used for everything. mu sync.RWMutex - resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. - idlenessState ccIdlenessState // Tracks idleness state of the channel. - exitIdleCond *sync.Cond // Signalled when channel exits idle. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). + firstResolveEvent *grpcsync.Event lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } -// ccIdlenessState tracks the idleness state of the channel. -// -// Channels start off in `active` and move to `idle` after a period of -// inactivity. When moving back to `active` upon an incoming RPC, they -// transition through `exiting_idle`. This state is useful for synchronization -// with Close(). -// -// This state tracking is mostly for self-protection. The idlenessManager is -// expected to keep track of the state as well, and is expected not to call into -// the ClientConn unnecessarily. -type ccIdlenessState int8 - -const ( - ccIdlenessStateActive ccIdlenessState = iota - ccIdlenessStateIdle - ccIdlenessStateExitingIdle -) - -func (s ccIdlenessState) String() string { - switch s { - case ccIdlenessStateActive: - return "active" - case ccIdlenessStateIdle: - return "idle" - case ccIdlenessStateExitingIdle: - return "exitingIdle" - default: - return "unknown" - } -} - // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -748,29 +657,15 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.exitIdleMode() + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) + return + } // If the ClientConn was not in idle mode, we need to call ExitIdle on the // LB policy so that connections can be created. - cc.balancerWrapper.exitIdleMode() -} - -func (cc *ClientConn) scWatcher() { - for { - select { - case sc, ok := <-cc.dopts.scChan: - if !ok { - return - } - cc.mu.Lock() - // TODO: load balance policy runtime change is ignored. - // We may revisit this decision in the future. - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - cc.mu.Unlock() - case <-cc.ctx.Done(): - return - } - } + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() } // waitForResolvedAddrs blocks until the resolver has provided addresses or the @@ -804,11 +699,11 @@ func init() { internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { return cc.csMgr.pubSub.Subscribe(s) } - internal.EnterIdleModeForTesting = func(cc *ClientConn) error { - return cc.enterIdleMode() + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() } internal.ExitIdleModeForTesting = func(cc *ClientConn) error { - return cc.exitIdleMode() + return cc.idlenessMgr.ExitIdleMode() } } @@ -824,9 +719,8 @@ func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { } } -func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { defer cc.firstResolveEvent.Fire() - cc.mu.Lock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. @@ -872,7 +766,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { if cc.sc == nil { // Apply the failing LB only if we haven't received valid service config // from the name resolver in the past. - cc.applyFailingLB(s.ServiceConfig) + cc.applyFailingLBLocked(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -894,15 +788,13 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// applyFailingLB is akin to configuring an LB policy on the channel which +// applyFailingLBLocked is akin to configuring an LB policy on the channel which // always fails RPCs. Here, an actual LB policy is not configured, but an always // erroring picker is configured, which returns errors with information about // what was invalid in the received service config. A config selector with no // service config is configured, and the connectivity state of the channel is // set to TransientFailure. -// -// Caller must hold cc.mu. -func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { var err error if sc.Err != nil { err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) @@ -910,14 +802,10 @@ func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) cc.csMgr.updateState(connectivity.TransientFailure) } -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.balancerWrapper.updateSubConnState(sc, s, err) -} - // Makes a copy of the input addresses slice and clears out the balancer // attributes field. Addresses are passed during subconn creation and address // update operations. In both cases, we will clear the balancer attributes by @@ -932,10 +820,14 @@ func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Ad return out } -// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + if cc.conns == nil { + return nil, ErrClientConnClosing + } + ac := &addrConn{ state: connectivity.Idle, cc: cc, @@ -947,12 +839,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.conns == nil { - return nil, ErrClientConnClosing - } var err error ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") @@ -968,6 +854,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub }, }) + // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.conns[ac] = struct{}{} return ac, nil } @@ -1174,7 +1061,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) @@ -1216,12 +1103,12 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() - r := cc.resolverWrapper + cc.resolverWrapper.resolveNow(o) cc.mu.RUnlock() - if r == nil { - return - } - go r.resolveNow(o) +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) } // ResetConnectBackoff wakes up all subchannels in transient failure and causes @@ -1253,40 +1140,32 @@ func (cc *ClientConn) Close() error { <-cc.csMgr.pubSub.Done() }() + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() + cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } - for cc.idlenessState == ccIdlenessStateExitingIdle { - cc.exitIdleCond.Wait() - } - conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) - pWrapper := cc.blockingpicker - rWrapper := cc.resolverWrapper - bWrapper := cc.balancerWrapper - idlenessMgr := cc.idlenessMgr + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. cc.mu.Unlock() + cc.resolverWrapper.close() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - if pWrapper != nil { - pWrapper.close() - } - if bWrapper != nil { - bWrapper.close() - } - if rWrapper != nil { - rWrapper.close() - } - if idlenessMgr != nil { - idlenessMgr.Close() - } + cc.pickerWrapper.close() + cc.balancerWrapper.close() + + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() for ac := range conns { ac.tearDown(ErrClientConnClosing) @@ -1307,7 +1186,7 @@ type addrConn struct { cc *ClientConn dopts dialOptions - acbw balancer.SubConn + acbw *acBalancerWrapper scopts balancer.NewSubConnOptions // transport is set when there's a viable transport (note: ac state may not be READY as LB channel @@ -1345,7 +1224,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) + ac.acbw.updateState(s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1849,7 +1728,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -2007,32 +1886,3 @@ func (cc *ClientConn) determineAuthority() error { channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil } - -// initResolverWrapper creates a ccResolverWrapper, which builds the name -// resolver. This method grabs the lock to assign the newly built resolver -// wrapper to the cc.resolverWrapper field. -func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { - rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ - target: cc.parsedTarget, - builder: cc.resolverBuilder, - bOpts: resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: creds, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - }, - channelzID: cc.channelzID, - }) - if err != nil { - return fmt.Errorf("failed to build resolver: %v", err) - } - // Resolver implementations may report state update or error inline when - // built (or right after), and this is handled in cc.updateResolverState. - // Also, an error from the resolver might lead to a re-resolution request - // from the balancer, which is handled in resolveNow() where - // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. - cc.mu.Lock() - cc.resolverWrapper = rw - cc.mu.Unlock() - return nil -} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 11b106182d..08476ad1fe 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -25,7 +25,13 @@ import ( "strconv" ) -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md type Code uint32 const ( diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 877b7cd21a..5dafd34edf 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -44,10 +44,25 @@ func (t TLSInfo) AuthType() string { return "tls" } +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + // GetSecurityValue returns security info requested by channelz. func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], + StandardName: cipherSuiteLookup(t.State.CipherSuite), } // Currently there's no way to get LocalCertificate info from tls package. if len(t.State.PeerCertificates) > 0 { @@ -138,10 +153,39 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{credinternal.CloneTLSConfig(c)} tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } return tc } @@ -205,32 +249,3 @@ type TLSChannelzSecurityValue struct { LocalCertificate []byte RemoteCertificate []byte } - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", - tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", - tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index cfc9fd85e8..ba24261804 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -46,6 +46,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithRecvBufferPool = withRecvBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -63,7 +64,6 @@ type dialOptions struct { block bool returnLastError bool timeout time.Duration - scChan <-chan ServiceConfig authority string binaryLogger binarylog.Logger copts transport.ConnectOptions @@ -250,19 +250,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithServiceConfig returns a DialOption which has a channel to read the -// service configuration. -// -// Deprecated: service config should be received through name resolver or via -// WithDefaultServiceConfig, as specified at -// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be -// removed in a future 1.x release. -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.scChan = c - }) -} - // WithConnectParams configures the ClientConn to use the provided ConnectParams // for creating and maintaining connections to servers. // @@ -413,6 +400,17 @@ func WithTimeout(d time.Duration) DialOption { // connections. If FailOnNonTempDialError() is set to true, and an error is // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f @@ -487,7 +485,7 @@ func FailOnNonTempDialError(f bool) DialOption { // the RPCs. func WithUserAgent(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s + o.copts.UserAgent = s + " " + grpcUA }) } @@ -637,14 +635,16 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ - WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, UseProxy: true, + UserAgent: grpcUA, }, - recvBufferPool: nopBufferPool{}, - idleTimeout: 30 * time.Minute, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + recvBufferPool: nopBufferPool{}, } } @@ -705,11 +705,13 @@ func WithIdleTimeout(d time.Duration) DialOption { // options are used: WithStatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return withRecvBufferPool(bufferPool) +} + +func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { o.recvBufferPool = bufferPool }) diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 4399c3df49..11f91668ac 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -18,7 +18,10 @@ // Package buffer provides an implementation of an unbounded buffer. package buffer -import "sync" +import ( + "errors" + "sync" +) // Unbounded is an implementation of an unbounded buffer which does not use // extra goroutines. This is typically used for passing updates from one entity @@ -36,6 +39,7 @@ import "sync" type Unbounded struct { c chan any closed bool + closing bool mu sync.Mutex backlog []any } @@ -45,32 +49,32 @@ func NewUnbounded() *Unbounded { return &Unbounded{c: make(chan any, 1)} } +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t any) { +func (b *Unbounded) Put(t any) error { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return + if b.closing { + return errBufferClosed } if len(b.backlog) == 0 { select { case b.c <- t: - return + return nil default: } } b.backlog = append(b.backlog, t) + return nil } -// Load sends the earliest buffered data, if any, onto the read channel -// returned by Get(). Users are expected to call this every time they read a +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return - } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -78,6 +82,8 @@ func (b *Unbounded) Load() { b.backlog = b.backlog[1:] default: } + } else if b.closing && !b.closed { + close(b.c) } } @@ -88,18 +94,23 @@ func (b *Unbounded) Load() { // send the next buffered value onto the channel if there is any. // // If the unbounded buffer is closed, the read channel returned by this method -// is closed. +// is closed after all data is drained. func (b *Unbounded) Get() <-chan any { return b.c } -// Close closes the unbounded buffer. +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. func (b *Unbounded) Close() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { + if b.closing { return } - b.closed = true - close(b.c) + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 5395e77529..fc094f3441 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -31,6 +31,7 @@ import ( "time" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" ) const ( @@ -58,6 +59,12 @@ func TurnOn() { } } +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) + } +} + // IsOn returns whether channelz data collection is on. func IsOn() bool { return atomic.LoadInt32(&curState) == 1 diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 3cf10ddfbd..685a3cb41b 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,9 +36,6 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy. - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) // LeastRequestLB is set if we should support the least_request_experimental // LB policy, which can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 02b4b6a1c1..29f234acb1 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -50,46 +50,7 @@ var ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) - // XDSRingHash indicates whether ring hash support is enabled, which can be - // disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) - // XDSClientSideSecurity is used to control processing of security - // configuration on the client-side. - // - // Note that there is no env var protection for the server-side because we - // have a brand new API on the server-side and users explicitly need to use - // the new API to get security integration on the server. - XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster and - // DNS cluster is enabled, which can be disabled by setting the environment - // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - // to "false". - XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) - - // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, - // which can be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) - // XDSOutlierDetection indicates whether outlier detection support is - // enabled, which can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) - // XDSFederation indicates whether federation support is enabled, which can - // be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". - XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) - - // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "false". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") - // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which - // can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". - XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 0000000000..7f7044e173 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithRecvBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // RecvBufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 900917dbe6..f7f40a16ac 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -20,7 +20,6 @@ package grpcsync import ( "context" - "sync" "google.golang.org/grpc/internal/buffer" ) @@ -38,8 +37,6 @@ type CallbackSerializer struct { done chan struct{} callbacks *buffer.Unbounded - closedMu sync.Mutex - closed bool } // NewCallbackSerializer returns a new CallbackSerializer instance. The provided @@ -65,56 +62,34 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // callbacks to be executed by the serializer. It is not possible to add // callbacks once the context passed to NewCallbackSerializer is cancelled. func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - cs.closedMu.Lock() - defer cs.closedMu.Unlock() - - if cs.closed { - return false - } - cs.callbacks.Put(f) - return true + return cs.callbacks.Put(f) == nil } func (cs *CallbackSerializer) run(ctx context.Context) { - var backlog []func(context.Context) - defer close(cs.done) + + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) for ctx.Err() == nil { select { case <-ctx.Done(): // Do nothing here. Next iteration of the for loop will not happen, // since ctx.Err() would be non-nil. - case callback, ok := <-cs.callbacks.Get(): - if !ok { - return - } + case cb := <-cs.callbacks.Get(): cs.callbacks.Load() - callback.(func(ctx context.Context))(ctx) + cb.(func(context.Context))(ctx) } } - // Fetch pending callbacks if any, and execute them before returning from - // this method and closing cs.done. - cs.closedMu.Lock() - cs.closed = true - backlog = cs.fetchPendingCallbacks() + // Close the buffer to prevent new callbacks from being added. cs.callbacks.Close() - cs.closedMu.Unlock() - for _, b := range backlog { - b(ctx) - } -} -func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { - var backlog []func(context.Context) - for { - select { - case b := <-cs.callbacks.Get(): - backlog = append(backlog, b.(func(context.Context))) - cs.callbacks.Load() - default: - return backlog - } + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) } } diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index 6c272476e5..fe49cb74c5 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -26,8 +26,6 @@ import ( "sync" "sync/atomic" "time" - - "google.golang.org/grpc/grpclog" ) // For overriding in unit tests. @@ -39,27 +37,12 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { // and exit from idle mode. type Enforcer interface { ExitIdleMode() error - EnterIdleMode() error + EnterIdleMode() } -// Manager defines the functionality required to track RPC activity on a -// channel. -type Manager interface { - OnCallBegin() error - OnCallEnd() - Close() -} - -type noopManager struct{} - -func (noopManager) OnCallBegin() error { return nil } -func (noopManager) OnCallEnd() {} -func (noopManager) Close() {} - -// manager implements the Manager interface. It uses atomic operations to -// synchronize access to shared state and a mutex to guarantee mutual exclusion -// in a critical section. -type manager struct { +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { // State accessed atomically. lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. @@ -69,8 +52,7 @@ type manager struct { // Can be accessed without atomics or mutex since these are set at creation // time and read-only after that. enforcer Enforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. - logger grpclog.LoggerV2 + timeout time.Duration // idleMu is used to guarantee mutual exclusion in two scenarios: // - Opposing intentions: @@ -88,57 +70,48 @@ type manager struct { timer *time.Timer } -// ManagerOptions is a collection of options used by -// NewManager. -type ManagerOptions struct { - Enforcer Enforcer - Timeout time.Duration - Logger grpclog.LoggerV2 -} - // NewManager creates a new idleness manager implementation for the -// given idle timeout. -func NewManager(opts ManagerOptions) Manager { - if opts.Timeout == 0 { - return noopManager{} +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, } - - m := &manager{ - enforcer: opts.Enforcer, - timeout: int64(opts.Timeout), - logger: opts.Logger, - } - m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) - return m } -// resetIdleTimer resets the idle timer to the given duration. This method -// should only be called from the timer callback. -func (m *manager) resetIdleTimer(d time.Duration) { - m.idleMu.Lock() - defer m.idleMu.Unlock() - - if m.timer == nil { - // Only close sets timer to nil. We are done. +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { return } // It is safe to ignore the return value from Reset() because this method is - // only ever called from the timer callback, which means the timer has - // already fired. - m.timer.Reset(d) + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() + } + m.timer = timeAfterFunc(d, m.handleIdleTimeout) +} + +func (m *Manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() + m.resetIdleTimerLocked(d) } // handleIdleTimeout is the timer callback that is invoked upon expiry of the // configured idle timeout. The channel is considered inactive if there are no // ongoing calls and no RPC activity since the last time the timer fired. -func (m *manager) handleIdleTimeout() { +func (m *Manager) handleIdleTimeout() { if m.isClosed() { return } if atomic.LoadInt32(&m.activeCallsCount) > 0 { - m.resetIdleTimer(time.Duration(m.timeout)) + m.resetIdleTimer(m.timeout) return } @@ -148,24 +121,12 @@ func (m *manager) handleIdleTimeout() { // Set the timer to fire after a duration of idle timeout, calculated // from the time the most recent RPC completed. atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) - m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) return } - // This CAS operation is extremely likely to succeed given that there has - // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the - // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { - // This CAS operation can fail if an RPC started after we checked for - // activity at the top of this method, or one was ongoing from before - // the last time we were here. In both case, reset the timer and return. - m.resetIdleTimer(time.Duration(m.timeout)) - return - } - - // Now that we've set the active calls count to -math.MaxInt32, it's time to - // actually move to idle mode. + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. if m.tryEnterIdleMode() { // Successfully entered idle mode. No timer needed until we exit idle. return @@ -174,8 +135,7 @@ func (m *manager) handleIdleTimeout() { // Failed to enter idle mode due to a concurrent RPC that kept the channel // active, or because of an error from the channel. Undo the attempt to // enter idle, and reset the timer to try again later. - atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) - m.resetIdleTimer(time.Duration(m.timeout)) + m.resetIdleTimer(m.timeout) } // tryEnterIdleMode instructs the channel to enter idle mode. But before @@ -185,36 +145,49 @@ func (m *manager) handleIdleTimeout() { // Return value indicates whether or not the channel moved to idle mode. // // Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (m *manager) tryEnterIdleMode() bool { +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + m.idleMu.Lock() defer m.idleMu.Unlock() if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { - // An very short RPC could have come in (and also finished) after we + // A very short RPC could have come in (and also finished) after we // checked for calls count and activity in handleIdleTimeout(), but // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } - // No new RPCs have come in since we last set the active calls count value - // -math.MaxInt32 in the timer callback. And since we have the lock, it is - // safe to enter idle mode now. - if err := m.enforcer.EnterIdleMode(); err != nil { - m.logger.Errorf("Failed to enter idle mode: %v", err) - return false - } - - // Successfully entered idle mode. + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() m.actuallyIdle = true return true } +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + // OnCallBegin is invoked at the start of every RPC. -func (m *manager) OnCallBegin() error { +func (m *Manager) OnCallBegin() error { if m.isClosed() { return nil } @@ -227,7 +200,7 @@ func (m *manager) OnCallBegin() error { // Channel is either in idle mode or is in the process of moving to idle // mode. Attempt to exit idle mode to allow this RPC. - if err := m.exitIdleMode(); err != nil { + if err := m.ExitIdleMode(); err != nil { // Undo the increment to calls count, and return an error causing the // RPC to fail. atomic.AddInt32(&m.activeCallsCount, -1) @@ -238,28 +211,30 @@ func (m *manager) OnCallBegin() error { return nil } -// exitIdleMode instructs the channel to exit idle mode. -// -// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (m *manager) exitIdleMode() error { +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. m.idleMu.Lock() defer m.idleMu.Unlock() - if !m.actuallyIdle { - // This can happen in two scenarios: + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called // tryEnterIdleMode(). But before the latter could grab the lock, an RPC // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. // - // Either way, nothing to do here. + // In any case, there is nothing to do here. return nil } if err := m.enforcer.ExitIdleMode(); err != nil { - return fmt.Errorf("channel failed to exit idle mode: %v", err) + return fmt.Errorf("failed to exit idle mode: %w", err) } // Undo the idle entry process. This also respects any new RPC attempts. @@ -267,12 +242,12 @@ func (m *manager) exitIdleMode() error { m.actuallyIdle = false // Start a new timer to fire after the configured idle timeout. - m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) + m.resetIdleTimerLocked(m.timeout) return nil } // OnCallEnd is invoked at the end of every RPC. -func (m *manager) OnCallEnd() { +func (m *Manager) OnCallEnd() { if m.isClosed() { return } @@ -287,15 +262,17 @@ func (m *manager) OnCallEnd() { atomic.AddInt32(&m.activeCallsCount, -1) } -func (m *manager) isClosed() bool { +func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } -func (m *manager) Close() { +func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) m.idleMu.Lock() - m.timer.Stop() - m.timer = nil + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } m.idleMu.Unlock() } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 0d94c63e06..2549fe8e3b 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -73,6 +73,11 @@ var ( // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports any // func(*grpc.Server, string) + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. @@ -177,10 +182,12 @@ var ( GRPCResolverSchemeExtraMetadata string = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. - EnterIdleModeForTesting any // func(*grpc.ClientConn) error + EnterIdleModeForTesting any // func(*grpc.ClientConn) // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 99e1e5b36c..b66dcb2132 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -23,7 +23,6 @@ package dns import ( "context" "encoding/json" - "errors" "fmt" "net" "os" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -47,15 +47,11 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") -// Globals to stub out in tests. TODO: Perhaps these two can be combined into a -// single variable for testing the resolver? -var ( - newTimer = time.NewTimer - newTimerDNSResRate = time.NewTimer -) - func init() { resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer } const ( @@ -70,23 +66,6 @@ const ( txtAttribute = "grpc_config=" ) -var ( - errMissingAddr = errors.New("dns resolver: missing address") - - // Addresses ending with a colon that is supposed to be the separator - // between host and port is not allowed. E.g. "::" is a valid address as - // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with - // a colon as the host and port separator - errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -var ( - defaultResolver netResolver = net.DefaultResolver - // To prevent excessive re-resolution, we enforce a rate limit on DNS - // resolution requests. - minDNSResRate = 30 * time.Second -) - var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer @@ -94,7 +73,11 @@ var addressDialer = func(address string) func(context.Context, string, string) ( } } -var newNetResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -104,7 +87,7 @@ var newNetResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: addressDialer(authorityWithPort), + Dial: internal.AddressDialer(authorityWithPort), }, nil } @@ -142,13 +125,9 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.URL.Host == "" { - d.resolver = defaultResolver - } else { - d.resolver, err = newNetResolver(target.URL.Host) - if err != nil { - return nil, err - } + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err } d.wg.Add(1) @@ -161,12 +140,6 @@ func (b *dnsBuilder) Scheme() string { return "dns" } -type netResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - // deadResolver is a resolver that does nothing. type deadResolver struct{} @@ -178,7 +151,7 @@ func (deadResolver) Close() {} type dnsResolver struct { host string port string - resolver netResolver + resolver internal.NetResolver ctx context.Context cancel context.CancelFunc cc resolver.ClientConn @@ -223,29 +196,27 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var timer *time.Timer + var waitTime time.Duration if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - timer = newTimerDNSResRate(minDNSResRate) + waitTime = internal.MinResolutionRate select { case <-d.ctx.Done(): - timer.Stop() return case <-d.rn: } } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + waitTime = backoff.DefaultExponential.Backoff(backoffIndex) backoffIndex++ } select { case <-d.ctx.Done(): - timer.Stop() return - case <-timer.C: + case <-internal.TimeAfterFunc(waitTime): } } } @@ -387,7 +358,7 @@ func formatIP(addr string) (addrIP string, ok bool) { // target: ":80" defaultPort: "443" returns host: "localhost", port: "80" func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { - return "", "", errMissingAddr + return "", "", internal.ErrMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address @@ -397,7 +368,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if port == "" { // If the port field is empty (target ends with colon), e.g. "[::1]:", // this is an error. - return "", "", errEndsWithColon + return "", "", internal.ErrEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 0000000000..c7fc557d00 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overidden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // MinResolutionRate is the minimum rate at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionRate = 30 * time.Second + + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go new file mode 100644 index 0000000000..aeffd3e1c7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go @@ -0,0 +1,29 @@ +//go:build !unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 0000000000..078137b7fd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 17f7a21b5a..a9d70e2a16 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -75,11 +75,25 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s return nil, errors.New(msg) } + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } st := &serverHandlerTransport{ rw: w, req: r, closedCh: make(chan struct{}), writes: make(chan func()), + peer: p, contentType: contentType, contentSubtype: contentSubtype, stats: stats, @@ -134,6 +148,8 @@ type serverHandlerTransport struct { headerMD metadata.MD + peer peer.Peer + closeOnce sync.Once closedCh chan struct{} // closed on Close @@ -165,7 +181,13 @@ func (ht *serverHandlerTransport) Close(err error) { }) } -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} // strAddr is a net.Addr backed by either a TCP "ip:port" string, or // the empty string if unknown. @@ -347,10 +369,8 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. - - ctx := ht.req.Context() var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) @@ -370,34 +390,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { ht.Close(errors.New("request is done processing")) }() - req := ht.req - - s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} - } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - s.ctx = peer.NewContext(ctx, pr) - for _, sh := range ht.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: ht.RemoteAddr(), - Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + req := ht.req + s := &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index d6f5c49358..59f67655a8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -36,6 +36,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/internal/grpclog" @@ -43,7 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" istatus "google.golang.org/grpc/internal/status" - "google.golang.org/grpc/internal/syscall" + isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -176,7 +177,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if networkType == "tcp" && useProxy { return proxyDial(ctx, address, grpcUA) } - return (&net.Dialer{}).DialContext(ctx, networkType, address) + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -262,7 +263,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } keepaliveEnabled := false if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } keepaliveEnabled = true @@ -493,8 +494,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 6fa1eb4199..680c9eba0b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -68,18 +68,15 @@ var serverConnectionCounter uint64 // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - done chan struct{} - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - remoteAddr net.Addr - localAddr net.Addr - authInfo credentials.AuthInfo // auth info about the connection - inTapHandle tap.ServerInHandle - framer *framer + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window @@ -243,16 +240,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, } done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } t := &http2Server{ - ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: authInfo, + peer: peer, framer: framer, readerDone: make(chan struct{}), - writerDone: make(chan struct{}), + loopyWriterDone: make(chan struct{}), maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, @@ -267,8 +266,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, bufferPool: newBufferPool(), } t.logger = prefixLoggerForServerTransport(t) - // Add peer information to the http2server context. - t.ctx = peer.NewContext(t.ctx, t.getPeer()) t.controlBuf = newControlBuffer(t.done) if dynamicWindow { @@ -277,15 +274,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.stats { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{} - sh.HandleConn(t.ctx, connBegin) - } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) if err != nil { return nil, err } @@ -334,7 +323,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler t.loopy.run() - close(t.writerDone) + close(t.loopyWriterDone) }() go t.keepalive() return t, nil @@ -342,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -369,10 +358,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), } var ( // if false, content-type was missing or invalid @@ -511,9 +501,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.state = streamReadDone } if timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) } else { - s.ctx, s.cancel = context.WithCancel(t.ctx) + s.ctx, s.cancel = context.WithCancel(ctx) } // Attach the received metadata to the context. @@ -592,18 +582,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - for _, sh := range t.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: s.recvCompress, - WireLength: int(frame.Header().Length), - Header: mdata.Copy(), - } - sh.HandleRPC(s.ctx, inHeader) - } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ @@ -629,8 +607,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream)) { - defer close(t.readerDone) +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + <-t.loopyWriterDone + close(t.readerDone) + }() for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() @@ -664,7 +645,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle); err != nil { + if err := t.operateHeaders(ctx, frame, handle); err != nil { t.Close(err) break } @@ -1242,10 +1223,6 @@ func (t *http2Server) Close(err error) { for _, s := range streams { s.cancel() } - for _, sh := range t.stats { - connEnd := &stats.ConnEnd{} - sh.HandleConn(t.ctx, connEnd) - } } // deleteStream deletes the stream s from transport's active streams. @@ -1311,10 +1288,6 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo }) } -func (t *http2Server) RemoteAddr() net.Addr { - return t.remoteAddr -} - func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() @@ -1397,11 +1370,11 @@ func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, // RemoteName : } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { s.Security = au.GetSecurityValue() } s.RemoteFlowControlWindow = t.getOutFlowWindow() @@ -1433,10 +1406,12 @@ func (t *http2Server) getOutFlowWindow() int64 { } } -func (t *http2Server) getPeer() *peer.Peer { +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil } } @@ -1461,6 +1436,6 @@ func GetConnection(ctx context.Context) net.Conn { // SetConnection adds the connection to the context to be able to get // information about the destination ip and port for an incoming RPC. This also // allows any unary or streaming interceptors to see the connection. -func setConnection(ctx context.Context, conn net.Conn) context.Context { +func SetConnection(ctx context.Context, conn net.Conn) context.Context { return context.WithValue(ctx, connectionKey{}, conn) } diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 4159619878..24fa103257 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -28,6 +28,8 @@ import ( "net/http" "net/http/httputil" "net/url" + + "google.golang.org/grpc/internal" ) const proxyAuthHeaderKey = "Proxy-Authorization" @@ -112,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy // is necessary, dials, does the HTTP CONNECT handshake, and returns the // connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { newAddr := addr proxyURL, err := mapAddress(addr) if err != nil { @@ -122,15 +124,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, newAddr = proxyURL.Host } - conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) if err != nil { - return + return nil, err } - if proxyURL != nil { + if proxyURL == nil { // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return conn, err } - return + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index aac056e723..b7b8fec180 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" @@ -265,7 +266,8 @@ type Stream struct { // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). Not valid on server side. - headerValid bool + headerValid bool + headerWireLength int // Only set on server side. // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -425,6 +427,12 @@ func (s *Stream) Context() context.Context { return s.ctx } +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -437,6 +445,12 @@ func (s *Stream) Status() *status.Status { return s.status } +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + // SetHeader sets the header metadata. This can be called multiple times. // Server side only. // This should not be called in parallel to other data writes. @@ -698,7 +712,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream)) + HandleStreams(context.Context, func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -717,8 +731,8 @@ type ServerTransport interface { // handlers will be terminated asynchronously. Close(err error) - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr + // Peer returns the peer of the server transport. + Peer() *peer.Peer // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index a2cdcaf12a..4944682576 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -153,14 +153,16 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewIncomingContext creates a new context with incoming md attached. +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } // NewOutgoingContext creates a new context with outgoing md attached. If used // in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) } @@ -203,7 +205,8 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { } // ValueFromIncomingContext returns the metadata value corresponding to the metadata -// key from the incoming metadata if it exists. Key must be lower-case. +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. // // # Experimental // @@ -219,17 +222,16 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // We need to manually convert all keys to lower case, because MD is a - // map, and there's no guarantee that the MD attached to the context is - // created using our helper functions. - if strings.ToLower(k) == key { + // Case insenitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { return copyOf(v) } } return nil } -// the returned slice must not be modified in place func copyOf(v []string) []string { vals := make([]string, len(v)) copy(vals, v) diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index e01d219ffb..a821ff9b2b 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -32,6 +32,8 @@ import ( type Peer struct { // Addr is the peer address. Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr // AuthInfo is the authentication information of the transport. // It is nil if there is no transport security being used. AuthInfo credentials.AuthInfo diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 236837f415..bf56faa76d 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -37,7 +37,6 @@ import ( type pickerWrapper struct { mu sync.Mutex done bool - idle bool blockingCh chan struct{} picker balancer.Picker statsHandlers []stats.Handler // to record blocking picker calls @@ -53,11 +52,7 @@ func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done || pw.idle { - // There is a small window where a picker update from the LB policy can - // race with the channel going to idle mode. If the picker is idle here, - // it is because the channel asked it to do so, and therefore it is sage - // to ignore the update from the LB policy. + if pw.done { pw.mu.Unlock() return } @@ -210,23 +205,15 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } -func (pw *pickerWrapper) enterIdleMode() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.idle = true -} - -func (pw *pickerWrapper) exitIdleMode() { +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { pw.mu.Lock() defer pw.mu.Unlock() if pw.done { return } pw.blockingCh = make(chan struct{}) - pw.idle = false } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 2e9cf66b4a..5128f9364d 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -25,7 +25,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/pretty" @@ -65,19 +64,6 @@ type pfConfig struct { } func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - if !envconfig.PickFirstLBConfig { - // Prior to supporting loadbalancing configuration, the pick_first LB - // policy did not implement the balancer.ConfigParser interface. This - // meant that if a non-empty configuration was passed to it, the service - // config unmarshaling code would throw a warning log, but would - // continue using the pick_first LB policy. The code below ensures the - // same behavior is retained if the env var is not set. - if string(js) != "{}" { - logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) - } - return nil, nil - } - var cfg pfConfig if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 0000000000..14aa6f20ae --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package dns + +import ( + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index 804be887de..ada5b9bb79 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -136,3 +136,116 @@ func (a *AddressMap) Values() []any { } return ret } + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 11384e228e..bd1c7d01b7 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -240,11 +240,6 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. - NewServiceConfig(serviceConfig string) // ParseServiceConfig parses the provided service config and returns an // object that provides the parsed config. ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult @@ -286,6 +281,11 @@ func (t Target) Endpoint() string { return strings.TrimPrefix(endpoint, "/") } +// String returns a string representation of Target. +func (t Target) String() string { + return t.URL.String() +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go deleted file mode 100644 index d683305608..0000000000 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ /dev/null @@ -1,247 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// resolverStateUpdater wraps the single method used by ccResolverWrapper to -// report a state update from the actual resolver implementation. -type resolverStateUpdater interface { - updateResolverState(s resolver.State, err error) error -} - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConn interface. -type ccResolverWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc resolverStateUpdater - channelzID *channelz.Identifier - ignoreServiceConfig bool - opts ccResolverWrapperOpts - serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. - serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). - - // All incoming (resolver --> gRPC) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled on the serializer. - // Fields accessed *only* in these serializer callbacks, can therefore be - // accessed without a mutex. - curState resolver.State - - // mu guards access to the below fields. - mu sync.Mutex - closed bool - resolver resolver.Resolver // Accessed only from outgoing calls. -} - -// ccResolverWrapperOpts wraps the arguments to be passed when creating a new -// ccResolverWrapper. -type ccResolverWrapperOpts struct { - target resolver.Target // User specified dial target to resolve. - builder resolver.Builder // Resolver builder to use. - bOpts resolver.BuildOptions // Resolver build options to use. - channelzID *channelz.Identifier // Channelz identifier for the channel. -} - -// newCCResolverWrapper uses the resolver.Builder to build a Resolver and -// returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { - ctx, cancel := context.WithCancel(context.Background()) - ccr := &ccResolverWrapper{ - cc: cc, - channelzID: opts.channelzID, - ignoreServiceConfig: opts.bOpts.DisableServiceConfig, - opts: opts, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - - // Cannot hold the lock at build time because the resolver can send an - // update or error inline and these incoming calls grab the lock to schedule - // a callback in the serializer. - r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) - if err != nil { - cancel() - return nil, err - } - - // Any error reported by the resolver at build time that leads to a - // re-resolution request from the balancer is dropped by grpc until we - // return from this function. So, we don't have to handle pending resolveNow - // requests here. - ccr.mu.Lock() - ccr.resolver = r - ccr.mu.Unlock() - - return ccr, nil -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.mu.Lock() - defer ccr.mu.Unlock() - - // ccr.resolver field is set only after the call to Build() returns. But in - // the process of building, the resolver may send an error update which when - // propagated to the balancer may result in a re-resolution request. - if ccr.closed || ccr.resolver == nil { - return - } - ccr.resolver.ResolveNow(o) -} - -func (ccr *ccResolverWrapper) close() { - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - return - } - - channelz.Info(logger, ccr.channelzID, "Closing the name resolver") - - // Close the serializer to ensure that no more calls from the resolver are - // handled, before actually closing the resolver. - ccr.serializerCancel() - ccr.closed = true - r := ccr.resolver - ccr.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done() - - // Spawn a goroutine to close the resolver (since it may block trying to - // cleanup all allocated resources) and return early. - go r.Close() -} - -// serializerScheduleLocked is a convenience method to schedule a function to be -// run on the serializer while holding ccr.mu. -func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { - ccr.mu.Lock() - ccr.serializer.Schedule(f) - ccr.mu.Unlock() -} - -// UpdateState is called by resolver implementations to report new state to gRPC -// which includes addresses and service config. -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - errCh := make(chan error, 1) - if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } - } - ok := ccr.serializer.Schedule(func(context.Context) { - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - errCh <- balancer.ErrBadResolverState - return - } - errCh <- nil - }) - if !ok { - // The only time when Schedule() fail to add the callback to the - // serializer is when the serializer is closed, and this happens only - // when the resolver wrapper is closed. - return nil - } - return <-errCh -} - -// ReportError is called by resolver implementations to report errors -// encountered during name resolution to gRPC. -func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) - }) -} - -// NewAddress is called by the resolver implementation to send addresses to -// gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.serializerScheduleLocked(func(_ context.Context) { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// NewServiceConfig is called by the resolver implementation to send service -// configs to gRPC. -func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.ignoreServiceConfig { - channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// ParseServiceConfig is called by resolver implementations to parse a JSON -// representation of the service config. -func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) -} - -// addChannelzTraceEvent adds a channelz trace event containing the new -// state received from resolver implementations. -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - var updates []string - var oldSC, newSC *ServiceConfig - var oldOK, newOK bool - if ccr.curState.ServiceConfig != nil { - oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) - } - if s.ServiceConfig != nil { - newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) - } - if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) -} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 0000000000..c79bab1214 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,197 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.Schedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 8f60d42143..682fa1831e 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -70,6 +70,10 @@ func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) + } + internal.ServerFromContext = serverFromContext internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } @@ -81,6 +85,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption + internal.RecvBufferPool = recvBufferPool } var statusOK = status.New(codes.OK, "") @@ -139,7 +144,8 @@ type Server struct { channelzID *channelz.Identifier czData *channelzData - serverWorkerChannel chan func() + serverWorkerChannel chan func() + serverWorkerChannelClose func() } type serverOptions struct { @@ -578,11 +584,13 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { // options are used: StatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return recvBufferPool(bufferPool) +} + +func recvBufferPool(bufferPool SharedBufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.recvBufferPool = bufferPool }) @@ -616,15 +624,14 @@ func (s *Server) serverWorker() { // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + close(s.serverWorkerChannel) + }) for i := uint32(0); i < s.opts.numServerWorkers; i++ { go s.serverWorker() } } -func (s *Server) stopServerWorkers() { - close(s.serverWorkerChannel) -} - // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -806,6 +813,18 @@ func (l *listenSocket) Close() error { // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. // Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") @@ -917,7 +936,7 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } go func() { - s.serveStreams(st) + s.serveStreams(context.Background(), st, rawConn) s.removeConn(lisAddr, st) }() } @@ -971,18 +990,29 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { return st } -func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close(errors.New("finished serving streams for the server transport")) - var wg sync.WaitGroup +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } + + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) + } + }() streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) - + st.HandleStreams(ctx, func(stream *transport.Stream) { streamQuota.acquire() f := func() { defer streamQuota.release() - defer wg.Done() s.handleStream(st, stream) } @@ -996,7 +1026,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } go f() }) - wg.Wait() } var _ http.Handler = (*Server)(nil) @@ -1040,7 +1069,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } defer s.removeConn(listenerAddressForServeHTTP, st) - s.serveStreams(st) + s.serveStreams(r.Context(), st, nil) } func (s *Server) addConn(addr string, st transport.ServerTransport) bool { @@ -1689,6 +1718,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { ctx := stream.Context() + ctx = contextWithServer(ctx, s) var ti *traceInfo if EnableTracing { tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) @@ -1697,7 +1727,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str tr: tr, firstLine: firstLine{ client: false, - remoteAddr: t.RemoteAddr(), + remoteAddr: t.Peer().Addr, }, } if dl, ok := ctx.Deadline(); ok { @@ -1731,6 +1761,22 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { @@ -1820,62 +1866,68 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { - s.quit.Fire() - - defer func() { - s.serveWG.Wait() - s.done.Fire() - }() - - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) - - s.mu.Lock() - listeners := s.lis - s.lis = nil - conns := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Broadcast() - s.mu.Unlock() - - for lis := range listeners { - lis.Close() - } - for _, cs := range conns { - for st := range cs { - st.Close(errors.New("Server.Stop called")) - } - } - if s.opts.numServerWorkers > 0 { - s.stopServerWorkers() - } - - s.mu.Lock() - if s.events != nil { - s.events.Finish() - s.events = nil - } - s.mu.Unlock() + s.stop(false) } // GracefulStop stops the gRPC server gracefully. It stops the server from // accepting new connections and RPCs and blocks until all the pending RPCs are // finished. func (s *Server) GracefulStop() { + s.stop(true) +} + +func (s *Server) stop(graceful bool) { s.quit.Fire() defer s.done.Fire() s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) + s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - return + s.closeListenersLocked() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() } - for lis := range s.lis { - lis.Close() + for len(s.conns) != 0 { + s.cv.Wait() } - s.lis = nil + s.conns = nil + + if s.opts.numServerWorkers > 0 { + // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // connections have been closed above ensures that there are no + // goroutines executing the callback passed to st.HandleStreams (where + // the channel is written to). + s.serverWorkerChannelClose() + } + + if s.events != nil { + s.events.Finish() + s.events = nil + } +} + +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) + } + } +} + +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { if !s.drain { for _, conns := range s.conns { for st := range conns { @@ -1884,22 +1936,14 @@ func (s *Server) GracefulStop() { } s.drain = true } +} - // Wait for serving threads to be ready to exit. Only then can we be sure no - // new conns will be created. - s.mu.Unlock() - s.serveWG.Wait() - s.mu.Lock() - - for len(s.conns) != 0 { - s.cv.Wait() +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() } - s.conns = nil - if s.events != nil { - s.events.Finish() - s.events = nil - } - s.mu.Unlock() + s.lis = nil } // contentSubtype must be lowercase @@ -1913,11 +1957,50 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { } codec := encoding.GetCodec(contentSubtype) if codec == nil { + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) return encoding.GetCodec(proto.Name) } return codec } +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + // SetHeader sets the header metadata to be sent from the server to the client. // The context provided must be the context passed to the server's handler. // diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 6d2cadd79a..dc2cea59c9 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.59.0" +const Version = "1.60.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bb480f1f9c..896dc38f50 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -35,7 +35,6 @@ if [[ "$1" = "-install" ]]; then # Install the pinned versions as defined in module tools. pushd ./test/tools go install \ - golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ github.com/client9/misspell/cmd/misspell @@ -77,12 +76,16 @@ fi not grep 'func Test[^(]' *_test.go not grep 'func Test[^(]' test/*.go +# - Check for typos in test function names +git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' +git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' + # - Do not import x/net/context. not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' # - Do not use "interface{}"; use "any" instead. git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' @@ -94,15 +97,14 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" # - Ensure all usages of grpc_testing package are renamed when importing. -not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' misspell -error . -# - gofmt, goimports, golint (with exceptions for generated code), go vet, -# go mod tidy. +# - gofmt, goimports, go vet, go mod tidy. # Perform these checks on each module inside gRPC. for MOD_FILE in $(find . -name 'go.mod'); do MOD_DIR=$(dirname ${MOD_FILE}) @@ -110,7 +112,6 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ @@ -119,94 +120,73 @@ for MOD_FILE in $(find . -name 'go.mod'); do done # - Collection of static analysis checks -# -# TODO(dfawley): don't use deprecated functions in examples or first-party -# plugins. -# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true -# Error if anything other than deprecation warnings are printed. -not grep -v "is deprecated:.*SA1019" "${SC_OUT}" -# Only ignore the following deprecated types/fields/functions. -not grep -Fv '.CredsBundle -.HeaderMap -.Metadata is deprecated: use Attributes -.NewAddress -.NewServiceConfig -.Type is deprecated: use Attributes -BuildVersion is deprecated -balancer.ErrTransientFailure -balancer.Picker -extDesc.Filename is deprecated -github.com/golang/protobuf/jsonpb is deprecated -grpc.CallCustomCodec -grpc.Code -grpc.Compressor -grpc.CustomCodec -grpc.Decompressor -grpc.MaxMsgSize -grpc.MethodConfig -grpc.NewGZIPCompressor -grpc.NewGZIPDecompressor -grpc.RPCCompressor -grpc.RPCDecompressor -grpc.ServiceConfig -grpc.WithCompressor -grpc.WithDecompressor -grpc.WithDialer -grpc.WithMaxMsgSize -grpc.WithServiceConfig -grpc.WithTimeout -http.CloseNotifier -info.SecurityVersion -proto is deprecated -proto.InternalMessageInfo is deprecated -proto.EnumName is deprecated -proto.ErrInternalBadWireType is deprecated -proto.FileDescriptor is deprecated -proto.Marshaler is deprecated -proto.MessageType is deprecated -proto.RegisterEnum is deprecated -proto.RegisterFile is deprecated -proto.RegisterType is deprecated -proto.RegisterExtension is deprecated -proto.RegisteredExtension is deprecated -proto.RegisteredExtensions is deprecated -proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated +staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true + +# Error for anything other than checks that need exclusions. +grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" + +# Exclude underscore checks for generated code. +grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)' + +# Error for duplicate imports not including grpc protos. +grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +channelz/grpc_channelz_v1" +go-control-plane/envoy +grpclb/grpc_lb_v1" +health/grpc_health_v1" +interop/grpc_testing" +orca/v3" +proto/grpc_gcp" +proto/grpc_lookup_v1" +reflection/grpc_reflection_v1" +reflection/grpc_reflection_v1alpha" +XXXXX PleaseIgnoreUnused' + +# Error for any package comments not in generated code. +grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" + +# Only ignore the following deprecated types/fields/functions and exclude +# generated code. +grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +XXXXX Protobuf related deprecation errors: +"github.com/golang/protobuf +.pb.go: +: ptypes. +proto.RegisterType +XXXXX gRPC internal usage deprecation errors: +"google.golang.org/grpc +: grpc. +: v1alpha. +: v1alphareflectionpb. +BalancerAttributes is deprecated: +CredsBundle is deprecated: +Metadata is deprecated: use Attributes instead. +NewSubConn is deprecated: +OverrideServerName is deprecated: +RemoveSubConn is deprecated: +SecurityVersion is deprecated: Target is deprecated: Use the Target field in the BuildOptions instead. -xxx_messageInfo_ -' "${SC_OUT}" - -# - special golint on package comments. -lint_package_comment_per_package() { - # Number of files in this go package. - fileCount=$(go list -f '{{len .GoFiles}}' $1) - if [ ${fileCount} -eq 0 ]; then - return 0 - fi - # Number of package errors generated by golint. - lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") - # golint complains about every file that's missing the package comment. If the - # number of files for this package is greater than the number of errors, there's - # at least one file with package comment, good. Otherwise, fail. - if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then - echo "Package $1 (with ${fileCount} files) is missing package comment" - return 1 - fi -} -lint_package_comment() { - set +ex - - count=0 - for i in $(go list ./...); do - lint_package_comment_per_package "$i" - ((count += $?)) - done - - set -ex - return $count -} -lint_package_comment +UpdateAddresses is deprecated: +UpdateSubConnState is deprecated: +balancer.ErrTransientFailure is deprecated: +grpc/reflection/v1alpha/reflection.proto +XXXXX xDS deprecated fields we support +.ExactMatch +.PrefixMatch +.SafeRegexMatch +.SuffixMatch +GetContainsMatch +GetExactMatch +GetMatchSubjectAltNames +GetPrefixMatch +GetSafeRegexMatch +GetSuffixMatch +GetTlsCertificateCertificateProviderInstance +GetValidationContextCertificateProviderInstance +XXXXX TODO: Remove the below deprecation usages: +CloseNotifier +Roots.Subjects +XXXXX PleaseIgnoreUnused' echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 5f28148d80..f47902371a 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -11,6 +11,7 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/json" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" @@ -23,7 +24,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -37,7 +38,7 @@ type UnmarshalOptions struct { // required fields will not return an error. AllowPartial bool - // If DiscardUnknown is set, unknown fields are ignored. + // If DiscardUnknown is set, unknown fields and enum name values are ignored. DiscardUnknown bool // Resolver is used for looking up types when unmarshaling @@ -47,9 +48,13 @@ type UnmarshalOptions struct { protoregistry.MessageTypeResolver protoregistry.ExtensionTypeResolver } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // It will clear the message first before setting the fields. // If it returns an error, the given message may be partially set. @@ -67,6 +72,9 @@ func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { if o.Resolver == nil { o.Resolver = protoregistry.GlobalTypes } + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } dec := decoder{json.NewDecoder(b), o} if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { @@ -114,6 +122,10 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { // unmarshalMessage unmarshals a message into the given protoreflect.Message. func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + d.opts.RecursionLimit-- + if d.opts.RecursionLimit < 0 { + return errors.New("exceeded max recursion depth") + } if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { return unmarshal(d, m) } @@ -266,7 +278,9 @@ func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.Field if err != nil { return err } - m.Set(fd, val) + if val.IsValid() { + m.Set(fd, val) + } return nil } @@ -329,7 +343,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. } case protoreflect.EnumKind: - if v, ok := unmarshalEnum(tok, fd); ok { + if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { return v, nil } @@ -474,7 +488,7 @@ func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { return protoreflect.ValueOfBytes(b), true } -func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { switch tok.Kind() { case json.String: // Lookup EnumNumber based on name. @@ -482,6 +496,9 @@ func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflec if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { return protoreflect.ValueOfEnum(enumVal.Number()), true } + if discardUnknown { + return protoreflect.Value{}, true + } case json.Number: if n, ok := tok.Int(32); ok { @@ -542,7 +559,9 @@ func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDesc if err != nil { return err } - list.Append(val) + if val.IsValid() { + list.Append(val) + } } } @@ -609,8 +628,9 @@ Loop: if err != nil { return err } - - mmap.Set(pkey, pval) + if pval.IsValid() { + mmap.Set(pkey, pval) + } } return nil diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go index 21d5d2cb18..ae71007c18 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -6,6 +6,6 @@ // format. It follows the guide at // https://protobuf.dev/programming-guides/proto3#json. // -// This package produces a different output than the standard "encoding/json" +// This package produces a different output than the standard [encoding/json] // package, which does not operate correctly on protocol buffer messages. package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 66b95870e9..3f75098b6f 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -31,7 +31,7 @@ func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in JSON format using default options. +// Marshal writes the given [proto.Message] in JSON format using default options. // Do not depend on the output being stable. It may change over time across // different versions of the program. func Marshal(m proto.Message) ([]byte, error) { @@ -81,6 +81,25 @@ type MarshalOptions struct { // ╚â•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• EmitUnpopulated bool + // EmitDefaultValues specifies whether to emit default-valued primitive fields, + // empty lists, and empty maps. The fields affected are as follows: + // â•”â•â•â•â•â•â•â•╤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— + // â•‘ JSON │ Protobuf field â•‘ + // â• â•â•â•â•â•â•â•╪â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•£ + // â•‘ false │ non-optional scalar boolean fields â•‘ + // â•‘ 0 │ non-optional scalar numeric fields â•‘ + // â•‘ "" │ non-optional scalar string/byte fields â•‘ + // â•‘ [] │ empty repeated fields â•‘ + // â•‘ {} │ empty map fields â•‘ + // ╚â•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• + // + // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, + // i.e. presence-sensing fields that are omitted will remain omitted to preserve + // presence-sensing. + // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates + // a strict superset of the latter. + EmitDefaultValues bool + // Resolver is used for looking up types when expanding google.protobuf.Any // messages. If nil, this defaults to using protoregistry.GlobalTypes. Resolver interface { @@ -102,7 +121,7 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal marshals the given proto.Message in the JSON format using options in +// Marshal marshals the given [proto.Message] in the JSON format using options in // MarshalOptions. Do not depend on the output being stable. It may change over // time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { @@ -178,7 +197,11 @@ func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protorefl // unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range // method to additionally iterate over unpopulated fields. -type unpopulatedFieldRanger struct{ protoreflect.Message } +type unpopulatedFieldRanger struct { + protoreflect.Message + + skipNull bool +} func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { fds := m.Descriptor().Fields() @@ -192,6 +215,9 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil if isProto2Scalar || isSingularMessage { + if m.skipNull { + continue + } v = protoreflect.Value{} // use invalid value to emit null } if !f(fd, v) { @@ -217,8 +243,11 @@ func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { defer e.EndObject() var fields order.FieldRanger = m - if e.opts.EmitUnpopulated { - fields = unpopulatedFieldRanger{m} + switch { + case e.opts.EmitUnpopulated: + fields = unpopulatedFieldRanger{Message: m, skipNull: false} + case e.opts.EmitDefaultValues: + fields = unpopulatedFieldRanger{Message: m, skipNull: true} } if typeURL != "" { fields = typeURLFieldRanger{fields, typeURL} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 6c37d41744..25329b7692 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -176,7 +176,7 @@ func (d decoder) unmarshalAny(m protoreflect.Message) error { // Use another decoder to parse the unread bytes for @type field. This // avoids advancing a read from current decoder because the current JSON // object may contain the fields of the embedded type. - dec := decoder{d.Clone(), UnmarshalOptions{}} + dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} tok, err := findTypeURL(dec) switch err { case errEmptyObject: @@ -308,48 +308,25 @@ Loop: // array) in order to advance the read to the next JSON value. It relies on // the decoder returning an error if the types are not in valid sequence. func (d decoder) skipJSONValue() error { - tok, err := d.Read() - if err != nil { - return err - } - // Only need to continue reading for objects and arrays. - switch tok.Kind() { - case json.ObjectOpen: - for { - tok, err := d.Read() - if err != nil { - return err - } - switch tok.Kind() { - case json.ObjectClose: - return nil - case json.Name: - // Skip object field value. - if err := d.skipJSONValue(); err != nil { - return err - } + var open int + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose, json.ArrayClose: + open-- + case json.ObjectOpen, json.ArrayOpen: + open++ + if open > d.opts.RecursionLimit { + return errors.New("exceeded max recursion depth") } } - - case json.ArrayOpen: - for { - tok, err := d.Peek() - if err != nil { - return err - } - switch tok.Kind() { - case json.ArrayClose: - d.Read() - return nil - default: - // Skip array item. - if err := d.skipJSONValue(); err != nil { - return err - } - } + if open == 0 { + return nil } } - return nil } // unmarshalAnyValue unmarshals the given custom-type message from the JSON diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 4921b2d4a7..a45f112bce 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -21,7 +21,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -51,7 +51,7 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { @@ -739,7 +739,9 @@ func (d decoder) skipValue() error { case text.ListClose: return nil case text.MessageOpen: - return d.skipMessageValue() + if err := d.skipMessageValue(); err != nil { + return err + } default: // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 722a7b41df..95967e8112 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -33,7 +33,7 @@ func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in textproto format using default +// Marshal writes the given [proto.Message] in textproto format using default // options. Do not depend on the output being stable. It may change over time // across different versions of the program. func Marshal(m proto.Message) ([]byte, error) { @@ -97,7 +97,7 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal writes the given proto.Message in textproto format using options in +// Marshal writes the given [proto.Message] in textproto format using options in // MarshalOptions object. Do not depend on the output being stable. It may // change over time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index f4b4686cf9..e942bc983e 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -6,7 +6,7 @@ // See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, -// use the "google.golang.org/protobuf/proto" package instead. +// use the [google.golang.org/protobuf/proto] package instead. package protowire import ( @@ -87,7 +87,7 @@ func ParseError(n int) error { // ConsumeField parses an entire field record (both tag and value) and returns // the field number, the wire type, and the total length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). // // The total length includes the tag header and the end group marker (if the // field is a group). @@ -104,8 +104,8 @@ func ConsumeField(b []byte) (Number, Type, int) { } // ConsumeFieldValue parses a field value and returns its length. -// This assumes that the field Number and wire Type have already been parsed. -// This returns a negative length upon an error (see ParseError). +// This assumes that the field [Number] and wire [Type] have already been parsed. +// This returns a negative length upon an error (see [ParseError]). // // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. @@ -164,7 +164,7 @@ func AppendTag(b []byte, num Number, typ Type) []byte { } // ConsumeTag parses b as a varint-encoded tag, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeTag(b []byte) (Number, Type, int) { v, n := ConsumeVarint(b) if n < 0 { @@ -263,7 +263,7 @@ func AppendVarint(b []byte, v uint64) []byte { } // ConsumeVarint parses b as a varint-encoded uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeVarint(b []byte) (v uint64, n int) { var y uint64 if len(b) <= 0 { @@ -384,7 +384,7 @@ func AppendFixed32(b []byte, v uint32) []byte { } // ConsumeFixed32 parses b as a little-endian uint32, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed32(b []byte) (v uint32, n int) { if len(b) < 4 { return 0, errCodeTruncated @@ -412,7 +412,7 @@ func AppendFixed64(b []byte, v uint64) []byte { } // ConsumeFixed64 parses b as a little-endian uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed64(b []byte) (v uint64, n int) { if len(b) < 8 { return 0, errCodeTruncated @@ -432,7 +432,7 @@ func AppendBytes(b []byte, v []byte) []byte { } // ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeBytes(b []byte) (v []byte, n int) { m, n := ConsumeVarint(b) if n < 0 { @@ -456,7 +456,7 @@ func AppendString(b []byte, v string) []byte { } // ConsumeString parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeString(b []byte) (v string, n int) { bb, n := ConsumeBytes(b) return string(bb), n @@ -471,7 +471,7 @@ func AppendGroup(b []byte, num Number, v []byte) []byte { // ConsumeGroup parses b as a group value until the trailing end group marker, // and verifies that the end marker matches the provided num. The value v // does not contain the end marker, while the length does contain the end marker. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeGroup(num Number, b []byte) (v []byte, n int) { n = ConsumeFieldValue(num, StartGroupType, b) if n < 0 { @@ -495,8 +495,8 @@ func SizeGroup(num Number, n int) int { return n + SizeTag(num) } -// DecodeTag decodes the field Number and wire Type from its unified form. -// The Number is -1 if the decoded field number overflows int32. +// DecodeTag decodes the field [Number] and wire [Type] from its unified form. +// The [Number] is -1 if the decoded field number overflows int32. // Other than overflow, this does not check for field number validity. func DecodeTag(x uint64) (Number, Type) { // NOTE: MessageSet allows for larger field numbers than normal. @@ -506,7 +506,7 @@ func DecodeTag(x uint64) (Number, Type) { return Number(x >> 3), Type(x & 7) } -// EncodeTag encodes the field Number and wire Type into its unified form. +// EncodeTag encodes the field [Number] and wire [Type] into its unified form. func EncodeTag(num Number, typ Type) uint64 { return uint64(num)<<3 | uint64(typ&7) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index db5248e1b5..a45625c8d1 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -83,7 +83,13 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { case protoreflect.FileImports: for i := 0; i < vs.Len(); i++ { var rs records - rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") + rv := reflect.ValueOf(vs.Get(i)) + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPublic"), "IsPublic"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + }...) ss = append(ss, "{"+rs.Join()+"}") } return start + joinStrings(ss, allowMulti) + end @@ -92,34 +98,26 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { for i := 0; i < vs.Len(); i++ { m := reflect.ValueOf(vs).MethodByName("Get") v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue)) + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil)) } return start + joinStrings(ss, allowMulti && isEnumValue) + end } } -// descriptorAccessors is a list of accessors to print for each descriptor. -// -// Do not print all accessors since some contain redundant information, -// while others are pointers that we do not want to follow since the descriptor -// is actually a cyclic graph. -// -// Using a list allows us to print the accessors in a sensible order. -var descriptorAccessors = map[reflect.Type][]string{ - reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, - reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, - reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt - reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, - reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"}, - reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"}, - reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, +type methodAndName struct { + method reflect.Value + name string } func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { - io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil)) } -func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { + +func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { + return formatDescOpt(t, isRoot, allowMulti, record) +} + +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { rv := reflect.ValueOf(t) rt := rv.MethodByName("ProtoType").Type().In(0) @@ -129,26 +127,60 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { } _, isFile := t.(protoreflect.FileDescriptor) - rs := records{allowMulti: allowMulti} + rs := records{ + allowMulti: allowMulti, + record: record, + } if t.IsPlaceholder() { if isFile { - rs.Append(rv, "Path", "Package", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } else { - rs.Append(rv, "FullName", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("FullName"), "FullName"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } } else { switch { case isFile: - rs.Append(rv, "Syntax") + rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"}) case isRoot: - rs.Append(rv, "Syntax", "FullName") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Syntax"), "Syntax"}, + {rv.MethodByName("FullName"), "FullName"}, + }...) default: - rs.Append(rv, "Name") + rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"}) } switch t := t.(type) { case protoreflect.FieldDescriptor: - for _, s := range descriptorAccessors[rt] { - switch s { + accessors := []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + {rv.MethodByName("Cardinality"), "Cardinality"}, + {rv.MethodByName("Kind"), "Kind"}, + {rv.MethodByName("HasJSONName"), "HasJSONName"}, + {rv.MethodByName("JSONName"), "JSONName"}, + {rv.MethodByName("HasPresence"), "HasPresence"}, + {rv.MethodByName("IsExtension"), "IsExtension"}, + {rv.MethodByName("IsPacked"), "IsPacked"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + {rv.MethodByName("IsList"), "IsList"}, + {rv.MethodByName("IsMap"), "IsMap"}, + {rv.MethodByName("MapKey"), "MapKey"}, + {rv.MethodByName("MapValue"), "MapValue"}, + {rv.MethodByName("HasDefault"), "HasDefault"}, + {rv.MethodByName("Default"), "Default"}, + {rv.MethodByName("ContainingOneof"), "ContainingOneof"}, + {rv.MethodByName("ContainingMessage"), "ContainingMessage"}, + {rv.MethodByName("Message"), "Message"}, + {rv.MethodByName("Enum"), "Enum"}, + } + for _, s := range accessors { + switch s.name { case "MapKey": if k := t.MapKey(); k != nil { rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) @@ -157,20 +189,20 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { if v := t.MapValue(); v != nil { switch v.Kind() { case protoreflect.EnumKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())}) case protoreflect.MessageKind, protoreflect.GroupKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())}) default: - rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) + rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()}) } } case "ContainingOneof": if od := t.ContainingOneof(); od != nil { - rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) + rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())}) } case "ContainingMessage": if t.IsExtension() { - rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())}) } case "Message": if !t.IsMap() { @@ -187,13 +219,61 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { ss = append(ss, string(fs.Get(i).Name())) } if len(ss) > 0 { - rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) } - default: - rs.Append(rv, descriptorAccessors[rt]...) + + case protoreflect.FileDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("Imports"), "Imports"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + {rv.MethodByName("Services"), "Services"}, + }...) + + case protoreflect.MessageDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("IsMapEntry"), "IsMapEntry"}, + {rv.MethodByName("Fields"), "Fields"}, + {rv.MethodByName("Oneofs"), "Oneofs"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("RequiredNumbers"), "RequiredNumbers"}, + {rv.MethodByName("ExtensionRanges"), "ExtensionRanges"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + }...) + + case protoreflect.EnumDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Values"), "Values"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + }...) + + case protoreflect.EnumValueDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + }...) + + case protoreflect.ServiceDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Methods"), "Methods"}, + }...) + + case protoreflect.MethodDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Input"), "Input"}, + {rv.MethodByName("Output"), "Output"}, + {rv.MethodByName("IsStreamingClient"), "IsStreamingClient"}, + {rv.MethodByName("IsStreamingServer"), "IsStreamingServer"}, + }...) } - if rv.MethodByName("GoType").IsValid() { - rs.Append(rv, "GoType") + if m := rv.MethodByName("GoType"); m.IsValid() { + rs.Append(rv, methodAndName{m, "GoType"}) } } return start + rs.Join() + end @@ -202,19 +282,34 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { type records struct { recs [][2]string allowMulti bool + + // record is a function that will be called for every Append() or + // AppendRecs() call, to be used for testing with the + // InternalFormatDescOptForTesting function. + record func(string) } -func (rs *records) Append(v reflect.Value, accessors ...string) { +func (rs *records) AppendRecs(fieldName string, newRecs [2]string) { + if rs.record != nil { + rs.record(fieldName) + } + rs.recs = append(rs.recs, newRecs) +} + +func (rs *records) Append(v reflect.Value, accessors ...methodAndName) { for _, a := range accessors { + if rs.record != nil { + rs.record(a.name) + } var rv reflect.Value - if m := v.MethodByName(a); m.IsValid() { - rv = m.Call(nil)[0] + if a.method.IsValid() { + rv = a.method.Call(nil)[0] } if v.Kind() == reflect.Struct && !rv.IsValid() { - rv = v.FieldByName(a) + rv = v.FieldByName(a.name) } if !rv.IsValid() { - panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name)) } if _, ok := rv.Interface().(protoreflect.Value); ok { rv = rv.MethodByName("Interface").Call(nil)[0] @@ -261,7 +356,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { default: s = fmt.Sprint(v) } - rs.recs = append(rs.recs, [2]string{a, s}) + rs.recs = append(rs.recs, [2]string{a.name, s}) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 7c3689baee..193c68e8f9 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -21,11 +21,26 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) +// Edition is an Enum for proto2.Edition +type Edition int32 + +// These values align with the value of Enum in descriptor.proto which allows +// direct conversion between the proto enum and this enum. +const ( + EditionUnknown Edition = 0 + EditionProto2 Edition = 998 + EditionProto3 Edition = 999 + Edition2023 Edition = 1000 + EditionUnsupported Edition = 100000 +) + // The types in this file may have a suffix: // • L0: Contains fields common to all descriptors (except File) and // must be initialized up front. // • L1: Contains fields specific to a descriptor and -// must be initialized up front. +// must be initialized up front. If the associated proto uses Editions, the +// Editions features must always be resolved. If not explicitly set, the +// appropriate default must be resolved and set. // • L2: Contains fields that are lazily initialized when constructing // from the raw file descriptor. When constructing as a literal, the L2 // fields must be initialized up front. @@ -44,6 +59,7 @@ type ( } FileL1 struct { Syntax protoreflect.Syntax + Edition Edition // Only used if Syntax == Editions Path string Package protoreflect.FullName @@ -51,12 +67,35 @@ type ( Messages Messages Extensions Extensions Services Services + + EditionFeatures FileEditionFeatures } FileL2 struct { Options func() protoreflect.ProtoMessage Imports FileImports Locations SourceLocations } + + FileEditionFeatures struct { + // IsFieldPresence is true if field_presence is EXPLICIT + // https://protobuf.dev/editions/features/#field_presence + IsFieldPresence bool + // IsOpenEnum is true if enum_type is OPEN + // https://protobuf.dev/editions/features/#enum_type + IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED + // https://protobuf.dev/editions/features/#repeated_field_encoding + IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY + // https://protobuf.dev/editions/features/#utf8_validation + IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED + // https://protobuf.dev/editions/features/#message_encoding + IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW + // https://protobuf.dev/editions/features/#json_format + IsJSONCompliant bool + } ) func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } @@ -210,6 +249,9 @@ type ( ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor + + // Edition features. + Presence bool } Oneof struct { @@ -273,6 +315,9 @@ func (fd *Field) HasJSONName() bool { return fd.L1.StringNam func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.Presence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil + } return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) } func (fd *Field) HasOptionalKeyword() bool { diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 136f1b2157..8f94230ea1 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -12,6 +12,12 @@ import ( const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" +// Full and short names for google.protobuf.Edition. +const ( + Edition_enum_fullname = "google.protobuf.Edition" + Edition_enum_name = "Edition" +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -81,7 +87,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 - FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 14 ) // Names for google.protobuf.DescriptorProto. @@ -184,10 +190,12 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Features_field_name protoreflect.Name = "features" ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features" ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) @@ -195,6 +203,7 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Features_field_number protoreflect.FieldNumber = 50 ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 ) @@ -212,29 +221,26 @@ const ( // Field names for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" - ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" - ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" - ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" - ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" - ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" - ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" - ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" - ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" - ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" ) // Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 - ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 - ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 - ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4 - ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 - ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -478,6 +484,7 @@ const ( FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_Features_field_name protoreflect.Name = "features" FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" @@ -500,6 +507,7 @@ const ( FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.features" FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" ) @@ -525,6 +533,7 @@ const ( FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_Features_field_number protoreflect.FieldNumber = 50 FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -547,6 +556,7 @@ const ( MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_name protoreflect.Name = "features" MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" @@ -554,6 +564,7 @@ const ( MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.features" MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) @@ -564,6 +575,7 @@ const ( MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_Features_field_number protoreflect.FieldNumber = 12 MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -584,8 +596,9 @@ const ( FieldOptions_Weak_field_name protoreflect.Name = "weak" FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" FieldOptions_Retention_field_name protoreflect.Name = "retention" - FieldOptions_Target_field_name protoreflect.Name = "target" FieldOptions_Targets_field_name protoreflect.Name = "targets" + FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" + FieldOptions_Features_field_name protoreflect.Name = "features" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -597,8 +610,9 @@ const ( FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" - FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" + FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" + FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -613,8 +627,9 @@ const ( FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 - FieldOptions_Target_field_number protoreflect.FieldNumber = 18 FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 + FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 + FieldOptions_Features_field_number protoreflect.FieldNumber = 21 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -642,6 +657,27 @@ const ( FieldOptions_OptionTargetType_enum_name = "OptionTargetType" ) +// Names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" + FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault" +) + +// Field names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition" + FieldOptions_EditionDefault_Value_field_name protoreflect.Name = "value" + + FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition" + FieldOptions_EditionDefault_Value_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value" +) + +// Field numbers for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -650,13 +686,16 @@ const ( // Field names for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_name protoreflect.Name = "features" OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + OneofOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.features" OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" ) // Field numbers for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_number protoreflect.FieldNumber = 1 OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -671,11 +710,13 @@ const ( EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_name protoreflect.Name = "features" EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.features" EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) @@ -684,6 +725,7 @@ const ( EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_Features_field_number protoreflect.FieldNumber = 7 EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -696,15 +738,21 @@ const ( // Field names for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_Features_field_name protoreflect.Name = "features" + EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" + EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 + EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -716,15 +764,18 @@ const ( // Field names for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_name protoreflect.Name = "features" ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ServiceOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.features" ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" ) // Field numbers for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_number protoreflect.FieldNumber = 34 ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -739,10 +790,12 @@ const ( const ( MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_Features_field_name protoreflect.Name = "features" MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.features" MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" ) @@ -750,6 +803,7 @@ const ( const ( MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_Features_field_number protoreflect.FieldNumber = 35 MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -816,6 +870,120 @@ const ( UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FeatureSet. +const ( + FeatureSet_message_name protoreflect.Name = "FeatureSet" + FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet" +) + +// Field names for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" +) + +// Field numbers for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 +) + +// Full and short names for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence" + FeatureSet_FieldPresence_enum_name = "FieldPresence" +) + +// Full and short names for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" + FeatureSet_EnumType_enum_name = "EnumType" +) + +// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" + FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" +) + +// Full and short names for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" + FeatureSet_Utf8Validation_enum_name = "Utf8Validation" +) + +// Full and short names for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" + FeatureSet_MessageEncoding_enum_name = "MessageEncoding" +) + +// Full and short names for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" + FeatureSet_JsonFormat_enum_name = "JsonFormat" +) + +// Names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" + FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults" +) + +// Field names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_name protoreflect.Name = "defaults" + FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition" + FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition" + + FeatureSetDefaults_Defaults_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults" + FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition" + FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition" +) + +// Field numbers for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_number protoreflect.FieldNumber = 1 + FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_message_name protoreflect.Name = "FeatureSetEditionDefault" + FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault" +) + +// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features" + + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features" +) + +// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2 +) + // Names for google.protobuf.SourceCodeInfo. const ( SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index 1a509b63eb..f55dc01e3a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -162,11 +162,20 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growBoolSlice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -732,11 +741,20 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1138,11 +1156,20 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1544,11 +1571,20 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1950,11 +1986,20 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2356,11 +2401,20 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2762,11 +2816,20 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -3145,11 +3208,15 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3461,11 +3528,15 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3777,11 +3848,15 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growFloat32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -4093,11 +4168,15 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4409,11 +4488,15 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4725,11 +4808,15 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growFloat64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 61c483fac0..2ab2c62978 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -206,13 +206,18 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := t.MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - for _, v := range vs { - oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) - } + methods := make([]reflect.Method, 0, 2) + if m, ok := t.MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := t.MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 4f5fb67a0d..629bacdced 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -192,12 +192,17 @@ fieldLoop: // Derive a mapping of oneof wrappers to fields. oneofWrappers := mi.OneofWrappers - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - oneofWrappers = vs - } + methods := make([]reflect.Method, 0, 2) + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + oneofWrappers = vs } } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 4c491bdf48..517e94434c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -159,6 +159,42 @@ func (p pointer) SetPointer(v pointer) { p.v.Elem().Set(v.v) } +func growSlice(p pointer, addCap int) { + // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. + in := p.v.Elem() + out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) + reflect.Copy(out, in) + p.v.Elem().Set(out) +} + +func (p pointer) growBoolSlice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + growSlice(p, addCap) +} + func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } func (ms *messageState) pointer() pointer { panic("not supported") } func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index ee0e0573e3..4b020e3116 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -138,6 +138,46 @@ func (p pointer) SetPointer(v pointer) { *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) } +func (p pointer) growBoolSlice(addCap int) { + sp := p.BoolSlice() + s := make([]bool, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growInt32Slice(addCap int) { + sp := p.Int32Slice() + s := make([]int32, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + sp := p.Int64Slice() + s := make([]int64, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint64Slice(addCap int) { + p.growInt64Slice(addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + p.growInt64Slice(addCap) +} + // Static check that MessageState does not exceed the size of a pointer. const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go similarity index 96% rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index 61a84d3418..a008acd090 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go new file mode 100644 index 0000000000..60166f2ba3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -0,0 +1,74 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package strs + +import ( + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return protoreflect.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 0999f29d50..d8f48faffa 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,7 +51,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 31 + Minor = 32 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 48d47946bb..e5b03b5677 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -69,7 +69,7 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // UnmarshalState parses a wire-format message and places the result in m. // // This method permits fine-grained control over the unmarshaler. -// Most users should use Unmarshal instead. +// Most users should use [Unmarshal] instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { if o.RecursionLimit == 0 { o.RecursionLimit = protowire.DefaultRecursionLimit diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index ec71e717fe..80ed16a0c2 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -18,27 +18,27 @@ // This package contains functions to convert to and from the wire format, // an efficient binary serialization of protocol buffers. // -// • Size reports the size of a message in the wire format. +// - [Size] reports the size of a message in the wire format. // -// • Marshal converts a message to the wire format. -// The MarshalOptions type provides more control over wire marshaling. +// - [Marshal] converts a message to the wire format. +// The [MarshalOptions] type provides more control over wire marshaling. // -// • Unmarshal converts a message from the wire format. -// The UnmarshalOptions type provides more control over wire unmarshaling. +// - [Unmarshal] converts a message from the wire format. +// The [UnmarshalOptions] type provides more control over wire unmarshaling. // // # Basic message operations // -// • Clone makes a deep copy of a message. +// - [Clone] makes a deep copy of a message. // -// • Merge merges the content of a message into another. +// - [Merge] merges the content of a message into another. // -// • Equal compares two messages. For more control over comparisons -// and detailed reporting of differences, see package -// "google.golang.org/protobuf/testing/protocmp". +// - [Equal] compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// [google.golang.org/protobuf/testing/protocmp]. // -// • Reset clears the content of a message. +// - [Reset] clears the content of a message. // -// • CheckInitialized reports whether all required fields in a message are set. +// - [CheckInitialized] reports whether all required fields in a message are set. // // # Optional scalar constructors // @@ -46,9 +46,9 @@ // as pointers to a value. For example, an optional string field has the // Go type *string. // -// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String -// take a value and return a pointer to a new instance of it, -// to simplify construction of optional field values. +// - [Bool], [Int32], [Int64], [Uint32], [Uint64], [Float32], [Float64], and [String] +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. // // Generated enum types usually have an Enum method which performs the // same operation. @@ -57,29 +57,29 @@ // // # Extension accessors // -// • HasExtension, GetExtension, SetExtension, and ClearExtension -// access extension field values in a protocol buffer message. +// - [HasExtension], [GetExtension], [SetExtension], and [ClearExtension] +// access extension field values in a protocol buffer message. // // Extension fields are only supported in proto2. // // # Related packages // -// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to -// and from JSON. +// - Package [google.golang.org/protobuf/encoding/protojson] converts messages to +// and from JSON. // -// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to -// and from the text format. +// - Package [google.golang.org/protobuf/encoding/prototext] converts messages to +// and from the text format. // -// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a -// reflection interface for protocol buffer data types. +// - Package [google.golang.org/protobuf/reflect/protoreflect] provides a +// reflection interface for protocol buffer data types. // -// • Package "google.golang.org/protobuf/testing/protocmp" provides features -// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" -// package. +// - Package [google.golang.org/protobuf/testing/protocmp] provides features +// to compare protocol buffer messages with the [github.com/google/go-cmp/cmp] +// package. // -// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic -// message type, suitable for working with messages where the protocol buffer -// type is only known at runtime. +// - Package [google.golang.org/protobuf/types/dynamicpb] provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. // // This module contains additional packages for more specialized use cases. // Consult the individual package documentation for details. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index bf7f816d0e..4fed202f9f 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -129,7 +129,7 @@ func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // MarshalState returns the wire-format encoding of a message. // // This method permits fine-grained control over the marshaler. -// Most users should use Marshal instead. +// Most users should use [Marshal] instead. func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { return o.marshal(in.Buf, in.Message) } diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 5f293cda86..17899a3a76 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -26,7 +26,7 @@ func HasExtension(m Message, xt protoreflect.ExtensionType) bool { } // ClearExtension clears an extension field such that subsequent -// HasExtension calls return false. +// [HasExtension] calls return false. // It panics if m is invalid or if xt does not extend m. func ClearExtension(m Message, xt protoreflect.ExtensionType) { m.ProtoReflect().Clear(xt.TypeDescriptor()) diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go index d761ab331d..3c6fe57807 100644 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -21,7 +21,7 @@ import ( // The unknown fields of src are appended to the unknown fields of dst. // // It is semantically equivalent to unmarshaling the encoded form of src -// into dst with the UnmarshalOptions.Merge option specified. +// into dst with the [UnmarshalOptions.Merge] option specified. func Merge(dst, src Message) { // TODO: Should nil src be treated as semantically equivalent to a // untyped, read-only, empty message? What about a nil dst? diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go index 1f0d183b10..7543ee6b25 100644 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -15,18 +15,20 @@ import ( // protobuf module that accept a Message, except where otherwise specified. // // This is the v2 interface definition for protobuf messages. -// The v1 interface definition is "github.com/golang/protobuf/proto".Message. +// The v1 interface definition is [github.com/golang/protobuf/proto.Message]. // -// To convert a v1 message to a v2 message, -// use "github.com/golang/protobuf/proto".MessageV2. -// To convert a v2 message to a v1 message, -// use "github.com/golang/protobuf/proto".MessageV1. +// - To convert a v1 message to a v2 message, +// use [google.golang.org/protobuf/protoadapt.MessageV2Of]. +// - To convert a v2 message to a v1 message, +// use [google.golang.org/protobuf/protoadapt.MessageV1Of]. type Message = protoreflect.ProtoMessage -// Error matches all errors produced by packages in the protobuf module. +// Error matches all errors produced by packages in the protobuf module +// according to [errors.Is]. // -// That is, errors.Is(err, Error) reports whether an error is produced -// by this module. +// Example usage: +// +// if errors.Is(err, proto.Error) { ... } var Error error func init() { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index e4dfb12050..baa0cc6218 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -3,11 +3,11 @@ // license that can be found in the LICENSE file. // Package protodesc provides functionality for converting -// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values. // // The google.protobuf.FileDescriptorProto is a protobuf message that describes // the type information for a .proto file in a form that is easily serializable. -// The protoreflect.FileDescriptor is a more structured representation of +// The [protoreflect.FileDescriptor] is a more structured representation of // the FileDescriptorProto message where references and remote dependencies // can be directly followed. package protodesc @@ -24,11 +24,11 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// Resolver is the resolver used by NewFile to resolve dependencies. +// Resolver is the resolver used by [NewFile] to resolve dependencies. // The enums and messages provided must belong to some parent file, // which is also registered. // -// It is implemented by protoregistry.Files. +// It is implemented by [protoregistry.Files]. type Resolver interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) @@ -61,19 +61,19 @@ type FileOptions struct { AllowUnresolvable bool } -// NewFile creates a new protoreflect.FileDescriptor from the provided -// file descriptor message. See FileOptions.New for more information. +// NewFile creates a new [protoreflect.FileDescriptor] from the provided +// file descriptor message. See [FileOptions.New] for more information. func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { return FileOptions{}.New(fd, r) } -// NewFiles creates a new protoregistry.Files from the provided -// FileDescriptorSet message. See FileOptions.NewFiles for more information. +// NewFiles creates a new [protoregistry.Files] from the provided +// FileDescriptorSet message. See [FileOptions.NewFiles] for more information. func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { return FileOptions{}.NewFiles(fd) } -// New creates a new protoreflect.FileDescriptor from the provided +// New creates a new [protoreflect.FileDescriptor] from the provided // file descriptor message. The file must represent a valid proto file according // to protobuf semantics. The returned descriptor is a deep copy of the input. // @@ -93,9 +93,15 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot f.L1.Syntax = protoreflect.Proto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 + case "editions": + f.L1.Syntax = protoreflect.Editions + f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") @@ -108,6 +114,9 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } + if f.L1.Syntax == protoreflect.Editions { + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) + } f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -231,7 +240,7 @@ func (is importSet) importPublic(imps protoreflect.FileImports) { } } -// NewFiles creates a new protoregistry.Files from the provided +// NewFiles creates a new [protoregistry.Files] from the provided // FileDescriptorSet message. The descriptor set must include only // valid files according to protobuf semantics. The returned descriptors // are a deep copy of the input. diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 37efda1afe..aff6fd4900 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -137,6 +137,30 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if fd.JsonName != nil { f.L1.StringName.InitJSON(fd.GetJsonName()) } + + if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { + f.L1.Presence = resolveFeatureHasFieldPresence(f.Base.L0.ParentFile, fd) + // We reuse the existing field because the old option `[packed = + // true]` is mutually exclusive with the editions feature. + if fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + f.L1.HasPacked = true + f.L1.IsPacked = resolveFeatureRepeatedFieldEncodingPacked(f.Base.L0.ParentFile, fd) + } + + // We pretend this option is always explicitly set because the only + // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8 + // or to return the appropriate default. + // When using editions we either parse the option or resolve the + // appropriate default here (instead of later when this option is + // requested from the descriptor). + // In proto2/proto3 syntax HasEnforceUTF8 might be false. + f.L1.HasEnforceUTF8 = true + f.L1.EnforceUTF8 = resolveFeatureEnforceUTF8(f.Base.L0.ParentFile, fd) + + if f.L1.Kind == protoreflect.MessageKind && resolveFeatureDelimitedEncoding(f.Base.L0.ParentFile, fd) { + f.L1.Kind = protoreflect.GroupKind + } + } } return fs, nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go new file mode 100644 index 0000000000..7352926cab --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -0,0 +1,177 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + _ "embed" + "fmt" + "os" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2 + SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 +) + +//go:embed editions_defaults.binpb +var binaryEditionDefaults []byte +var defaults = &descriptorpb.FeatureSetDefaults{} +var defaultsCacheMu sync.Mutex +var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) + +func init() { + err := proto.Unmarshal(binaryEditionDefaults, defaults) + if err != nil { + fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) + os.Exit(1) + } +} + +func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition { + return filedesc.Edition(epb) +} + +func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { + switch ed { + case filedesc.EditionUnknown: + return descriptorpb.Edition_EDITION_UNKNOWN + case filedesc.EditionProto2: + return descriptorpb.Edition_EDITION_PROTO2 + case filedesc.EditionProto3: + return descriptorpb.Edition_EDITION_PROTO3 + case filedesc.Edition2023: + return descriptorpb.Edition_EDITION_2023 + default: + panic(fmt.Sprintf("unknown value for edition: %v", ed)) + } +} + +func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { + defaultsCacheMu.Lock() + defer defaultsCacheMu.Unlock() + if def, ok := defaultsCache[ed]; ok { + return def + } + edpb := toEditionProto(ed) + if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb { + // This should never happen protodesc.(FileOptions).New would fail when + // initializing the file descriptor. + // This most likely means the embedded defaults were not updated. + fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) + os.Exit(1) + } + fs := defaults.GetDefaults()[0].GetFeatures() + // Using a linear search for now. + // Editions are guaranteed to be sorted and thus we could use a binary search. + // Given that there are only a handful of editions (with one more per year) + // there is not much reason to use a binary search. + for _, def := range defaults.GetDefaults() { + if def.GetEdition() <= edpb { + fs = def.GetFeatures() + } else { + break + } + } + defaultsCache[ed] = fs + return fs +} + +func resolveFeatureHasFieldPresence(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.FieldPresence == nil { + return fileDesc.L1.EditionFeatures.IsFieldPresence + } + return fs.GetFieldPresence() == descriptorpb.FeatureSet_LEGACY_REQUIRED || + fs.GetFieldPresence() == descriptorpb.FeatureSet_EXPLICIT +} + +func resolveFeatureRepeatedFieldEncodingPacked(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.RepeatedFieldEncoding == nil { + return fileDesc.L1.EditionFeatures.IsPacked + } + return fs.GetRepeatedFieldEncoding() == descriptorpb.FeatureSet_PACKED +} + +func resolveFeatureEnforceUTF8(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.Utf8Validation == nil { + return fileDesc.L1.EditionFeatures.IsUTF8Validated + } + return fs.GetUtf8Validation() == descriptorpb.FeatureSet_VERIFY +} + +func resolveFeatureDelimitedEncoding(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.MessageEncoding == nil { + return fileDesc.L1.EditionFeatures.IsDelimitedEncoded + } + return fs.GetMessageEncoding() == descriptorpb.FeatureSet_DELIMITED +} + +// initFileDescFromFeatureSet initializes editions related fields in fd based +// on fs. If fs is nil it is assumed to be an empty featureset and all fields +// will be initialized with the appropriate default. fd.L1.Edition must be set +// before calling this function. +func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { + dfs := getFeatureSetFor(fd.L1.Edition) + if fs == nil { + fs = &descriptorpb.FeatureSet{} + } + + var fieldPresence descriptorpb.FeatureSet_FieldPresence + if fp := fs.FieldPresence; fp != nil { + fieldPresence = *fp + } else { + fieldPresence = *dfs.FieldPresence + } + fd.L1.EditionFeatures.IsFieldPresence = fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED || + fieldPresence == descriptorpb.FeatureSet_EXPLICIT + + var enumType descriptorpb.FeatureSet_EnumType + if et := fs.EnumType; et != nil { + enumType = *et + } else { + enumType = *dfs.EnumType + } + fd.L1.EditionFeatures.IsOpenEnum = enumType == descriptorpb.FeatureSet_OPEN + + var respeatedFieldEncoding descriptorpb.FeatureSet_RepeatedFieldEncoding + if rfe := fs.RepeatedFieldEncoding; rfe != nil { + respeatedFieldEncoding = *rfe + } else { + respeatedFieldEncoding = *dfs.RepeatedFieldEncoding + } + fd.L1.EditionFeatures.IsPacked = respeatedFieldEncoding == descriptorpb.FeatureSet_PACKED + + var isUTF8Validated descriptorpb.FeatureSet_Utf8Validation + if utf8val := fs.Utf8Validation; utf8val != nil { + isUTF8Validated = *utf8val + } else { + isUTF8Validated = *dfs.Utf8Validation + } + fd.L1.EditionFeatures.IsUTF8Validated = isUTF8Validated == descriptorpb.FeatureSet_VERIFY + + var messageEncoding descriptorpb.FeatureSet_MessageEncoding + if me := fs.MessageEncoding; me != nil { + messageEncoding = *me + } else { + messageEncoding = *dfs.MessageEncoding + } + fd.L1.EditionFeatures.IsDelimitedEncoded = messageEncoding == descriptorpb.FeatureSet_DELIMITED + + var jsonFormat descriptorpb.FeatureSet_JsonFormat + if jf := fs.JsonFormat; jf != nil { + jsonFormat = *jf + } else { + jsonFormat = *dfs.JsonFormat + } + fd.L1.EditionFeatures.IsJSONCompliant = jsonFormat == descriptorpb.FeatureSet_ALLOW +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb b/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb new file mode 100644 index 0000000000..1a8610a843 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb @@ -0,0 +1,4 @@ + +  (0æ +  (0ç +  (0è æ(è \ No newline at end of file diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a7c5ceffc9..9d6e05420f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -16,7 +16,7 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a // google.protobuf.FileDescriptorProto message. func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { p := &descriptorpb.FileDescriptorProto{ @@ -70,13 +70,13 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) } - if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } return p } -// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a // google.protobuf.DescriptorProto message. func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { p := &descriptorpb.DescriptorProto{ @@ -119,7 +119,7 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des return p } -// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a // google.protobuf.FieldDescriptorProto message. func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { p := &descriptorpb.FieldDescriptorProto{ @@ -168,7 +168,7 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi return p } -// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a // google.protobuf.OneofDescriptorProto message. func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { return &descriptorpb.OneofDescriptorProto{ @@ -177,7 +177,7 @@ func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.On } } -// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a // google.protobuf.EnumDescriptorProto message. func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { p := &descriptorpb.EnumDescriptorProto{ @@ -200,7 +200,7 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD return p } -// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a // google.protobuf.EnumValueDescriptorProto message. func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { return &descriptorpb.EnumValueDescriptorProto{ @@ -210,7 +210,7 @@ func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descrip } } -// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a // google.protobuf.ServiceDescriptorProto message. func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { p := &descriptorpb.ServiceDescriptorProto{ @@ -223,7 +223,7 @@ func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descripto return p } -// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a // google.protobuf.MethodDescriptorProto message. func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { p := &descriptorpb.MethodDescriptorProto{ diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index 55aa14922b..ec6572dfda 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -10,46 +10,46 @@ // // # Protocol Buffer Descriptors // -// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) +// Protobuf descriptors (e.g., [EnumDescriptor] or [MessageDescriptor]) // are immutable objects that represent protobuf type information. // They are wrappers around the messages declared in descriptor.proto. // Protobuf descriptors alone lack any information regarding Go types. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Descriptor and ProtoReflect.Descriptor accessors respectively // return the protobuf descriptor for the values. // // The protobuf descriptor interfaces are not meant to be implemented by // user code since they might need to be extended in the future to support // additions to the protobuf language. -// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// The [google.golang.org/protobuf/reflect/protodesc] package converts between // google.protobuf.DescriptorProto messages and protobuf descriptors. // // # Go Type Descriptors // -// A type descriptor (e.g., EnumType or MessageType) is a constructor for +// A type descriptor (e.g., [EnumType] or [MessageType]) is a constructor for // a concrete Go type that represents the associated protobuf descriptor. // There is commonly a one-to-one relationship between protobuf descriptors and // Go type descriptors, but it can potentially be a one-to-many relationship. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Type and ProtoReflect.Type accessors respectively // return the protobuf descriptor for the values. // -// The "google.golang.org/protobuf/types/dynamicpb" package can be used to +// The [google.golang.org/protobuf/types/dynamicpb] package can be used to // create Go type descriptors from protobuf descriptors. // // # Value Interfaces // -// The Enum and Message interfaces provide a reflective view over an +// The [Enum] and [Message] interfaces provide a reflective view over an // enum or message instance. For enums, it provides the ability to retrieve // the enum value number for any concrete enum type. For messages, it provides // the ability to access or manipulate fields of the message. // -// To convert a proto.Message to a protoreflect.Message, use the +// To convert a [google.golang.org/protobuf/proto.Message] to a [protoreflect.Message], use the // former's ProtoReflect method. Since the ProtoReflect method is new to the // v2 message interface, it may not be present on older message implementations. -// The "github.com/golang/protobuf/proto".MessageReflect function can be used +// The [github.com/golang/protobuf/proto.MessageReflect] function can be used // to obtain a reflective view on older messages. // // # Relationships @@ -71,12 +71,12 @@ // │ │ // └────────────────── Type() ───────┘ // -// • An EnumType describes a concrete Go enum type. +// • An [EnumType] describes a concrete Go enum type. // It has an EnumDescriptor and can construct an Enum instance. // -// • An EnumDescriptor describes an abstract protobuf enum type. +// • An [EnumDescriptor] describes an abstract protobuf enum type. // -// • An Enum is a concrete enum instance. Generated enums implement Enum. +// • An [Enum] is a concrete enum instance. Generated enums implement Enum. // // ┌──────────────── New() ─────────────────┠// │ │ @@ -90,24 +90,26 @@ // │ │ // └─────────────────── Type() ─────────┘ // -// • A MessageType describes a concrete Go message type. -// It has a MessageDescriptor and can construct a Message instance. -// Just as how Go's reflect.Type is a reflective description of a Go type, -// a MessageType is a reflective description of a Go type for a protobuf message. +// • A [MessageType] describes a concrete Go message type. +// It has a [MessageDescriptor] and can construct a [Message] instance. +// Just as how Go's [reflect.Type] is a reflective description of a Go type, +// a [MessageType] is a reflective description of a Go type for a protobuf message. // -// • A MessageDescriptor describes an abstract protobuf message type. -// It has no understanding of Go types. In order to construct a MessageType -// from just a MessageDescriptor, you can consider looking up the message type -// in the global registry using protoregistry.GlobalTypes.FindMessageByName -// or constructing a dynamic MessageType using dynamicpb.NewMessageType. +// • A [MessageDescriptor] describes an abstract protobuf message type. +// It has no understanding of Go types. In order to construct a [MessageType] +// from just a [MessageDescriptor], you can consider looking up the message type +// in the global registry using the FindMessageByName method on +// [google.golang.org/protobuf/reflect/protoregistry.GlobalTypes] +// or constructing a dynamic [MessageType] using +// [google.golang.org/protobuf/types/dynamicpb.NewMessageType]. // -// • A Message is a reflective view over a concrete message instance. -// Generated messages implement ProtoMessage, which can convert to a Message. -// Just as how Go's reflect.Value is a reflective view over a Go value, -// a Message is a reflective view over a concrete protobuf message instance. -// Using Go reflection as an analogy, the ProtoReflect method is similar to -// calling reflect.ValueOf, and the Message.Interface method is similar to -// calling reflect.Value.Interface. +// • A [Message] is a reflective view over a concrete message instance. +// Generated messages implement [ProtoMessage], which can convert to a [Message]. +// Just as how Go's [reflect.Value] is a reflective view over a Go value, +// a [Message] is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the [ProtoMessage.ProtoReflect] method is similar to +// calling [reflect.ValueOf], and the [Message.Interface] method is similar to +// calling [reflect.Value.Interface]. // // ┌── TypeDescriptor() ──┠┌───── Descriptor() ─────┠// │ V │ V @@ -119,15 +121,15 @@ // │ │ // └────── implements ────────┘ // -// • An ExtensionType describes a concrete Go implementation of an extension. -// It has an ExtensionTypeDescriptor and can convert to/from -// abstract Values and Go values. +// • An [ExtensionType] describes a concrete Go implementation of an extension. +// It has an [ExtensionTypeDescriptor] and can convert to/from +// an abstract [Value] and a Go value. // -// • An ExtensionTypeDescriptor is an ExtensionDescriptor -// which also has an ExtensionType. +// • An [ExtensionTypeDescriptor] is an [ExtensionDescriptor] +// which also has an [ExtensionType]. // -// • An ExtensionDescriptor describes an abstract protobuf extension field and -// may not always be an ExtensionTypeDescriptor. +// • An [ExtensionDescriptor] describes an abstract protobuf extension field and +// may not always be an [ExtensionTypeDescriptor]. package protoreflect import ( @@ -142,7 +144,7 @@ type doNotImplement pragma.DoNotImplement // ProtoMessage is the top-level interface that all proto messages implement. // This is declared in the protoreflect package to avoid a cyclic dependency; -// use the proto.Message type instead, which aliases this type. +// use the [google.golang.org/protobuf/proto.Message] type instead, which aliases this type. type ProtoMessage interface{ ProtoReflect() Message } // Syntax is the language version of the proto file. @@ -151,8 +153,9 @@ type Syntax syntax type syntax int8 // keep exact type opaque as the int type may change const ( - Proto2 Syntax = 2 - Proto3 Syntax = 3 + Proto2 Syntax = 2 + Proto3 Syntax = 3 + Editions Syntax = 4 ) // IsValid reports whether the syntax is valid. @@ -436,7 +439,7 @@ type Names interface { // FullName is a qualified name that uniquely identifies a proto declaration. // A qualified name is the concatenation of the proto package along with the // fully-declared name (i.e., name of parent preceding the name of the child), -// with a '.' delimiter placed between each Name. +// with a '.' delimiter placed between each [Name]. // // This should not have any leading or trailing dots. type FullName string // e.g., "google.protobuf.Field.Kind" @@ -480,7 +483,7 @@ func isLetterDigit(c byte) bool { } // Name returns the short name, which is the last identifier segment. -// A single segment FullName is the Name itself. +// A single segment FullName is the [Name] itself. func (n FullName) Name() Name { if i := strings.LastIndexByte(string(n), '.'); i >= 0 { return Name(n[i+1:]) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 717b106f3d..0c045db6ab 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,7 +35,7 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) - case 13: + case 14: b = p.appendSingularField(b, "edition", nil) } return b @@ -180,6 +180,8 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "php_metadata_namespace", nil) case 45: b = p.appendSingularField(b, "ruby_package", nil) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -240,6 +242,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "map_entry", nil) case 11: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 12: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -285,6 +289,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 6: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 7: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -330,6 +336,8 @@ func (p *SourcePath) appendServiceOptions(b []byte) []byte { return b } switch (*p)[0] { + case 34: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 33: b = p.appendSingularField(b, "deprecated", nil) case 999: @@ -361,16 +369,39 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "debug_redact", nil) case 17: b = p.appendSingularField(b, "retention", nil) - case 18: - b = p.appendSingularField(b, "target", nil) case 19: b = p.appendRepeatedField(b, "targets", nil) + case 20: + b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) + case 21: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFeatureSet(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "field_presence", nil) + case 2: + b = p.appendSingularField(b, "enum_type", nil) + case 3: + b = p.appendSingularField(b, "repeated_field_encoding", nil) + case 4: + b = p.appendSingularField(b, "utf8_validation", nil) + case 5: + b = p.appendSingularField(b, "message_encoding", nil) + case 6: + b = p.appendSingularField(b, "json_format", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { if len(*p) == 0 { return b @@ -422,6 +453,8 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) case 2: b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "verification", nil) } @@ -433,6 +466,8 @@ func (p *SourcePath) appendOneofOptions(b []byte) []byte { return b } switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -446,6 +481,10 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { switch (*p)[0] { case 1: b = p.appendSingularField(b, "deprecated", nil) + case 2: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 3: + b = p.appendSingularField(b, "debug_redact", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -461,12 +500,27 @@ func (p *SourcePath) appendMethodOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 34: b = p.appendSingularField(b, "idempotency_level", nil) + case 35: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 3: + b = p.appendSingularField(b, "edition", nil) + case 2: + b = p.appendSingularField(b, "value", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b @@ -491,8 +545,6 @@ func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { b = p.appendSingularField(b, "full_name", nil) case 3: b = p.appendSingularField(b, "type", nil) - case 4: - b = p.appendSingularField(b, "is_repeated", nil) case 5: b = p.appendSingularField(b, "reserved", nil) case 6: diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 3867470d30..60ff62b4c8 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -12,7 +12,7 @@ package protoreflect // exactly identical. However, it is possible for the same semantically // identical proto type to be represented by multiple type descriptors. // -// For example, suppose we have t1 and t2 which are both MessageDescriptors. +// For example, suppose we have t1 and t2 which are both an [MessageDescriptor]. // If t1 == t2, then the types are definitely equal and all accessors return // the same information. However, if t1 != t2, then it is still possible that // they still represent the same proto type (e.g., t1.FullName == t2.FullName). @@ -115,7 +115,7 @@ type Descriptor interface { // corresponds with the google.protobuf.FileDescriptorProto message. // // Top-level declarations: -// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. +// [EnumDescriptor], [MessageDescriptor], [FieldDescriptor], and/or [ServiceDescriptor]. type FileDescriptor interface { Descriptor // Descriptor.FullName is identical to Package @@ -180,8 +180,8 @@ type FileImport struct { // corresponds with the google.protobuf.DescriptorProto message. // // Nested declarations: -// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, -// and/or MessageDescriptor. +// [FieldDescriptor], [OneofDescriptor], [FieldDescriptor], [EnumDescriptor], +// and/or [MessageDescriptor]. type MessageDescriptor interface { Descriptor @@ -214,7 +214,7 @@ type MessageDescriptor interface { ExtensionRanges() FieldRanges // ExtensionRangeOptions returns the ith extension range options. // - // To avoid a dependency cycle, this method returns a proto.Message value, + // To avoid a dependency cycle, this method returns a proto.Message] value, // which always contains a google.protobuf.ExtensionRangeOptions message. // This method returns a typed nil-pointer if no options are present. // The caller must import the descriptorpb package to use this. @@ -231,9 +231,9 @@ type MessageDescriptor interface { } type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } -// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// MessageType encapsulates a [MessageDescriptor] with a concrete Go implementation. // It is recommended that implementations of this interface also implement the -// MessageFieldTypes interface. +// [MessageFieldTypes] interface. type MessageType interface { // New returns a newly allocated empty message. // It may return nil for synthetic messages representing a map entry. @@ -249,19 +249,19 @@ type MessageType interface { Descriptor() MessageDescriptor } -// MessageFieldTypes extends a MessageType by providing type information +// MessageFieldTypes extends a [MessageType] by providing type information // regarding enums and messages referenced by the message fields. type MessageFieldTypes interface { MessageType - // Enum returns the EnumType for the ith field in Descriptor.Fields. + // Enum returns the EnumType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not an enum kind. // It panics if out of bounds. // // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() Enum(i int) EnumType - // Message returns the MessageType for the ith field in Descriptor.Fields. + // Message returns the MessageType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not a message or group kind. // It panics if out of bounds. // @@ -286,8 +286,8 @@ type MessageDescriptors interface { // corresponds with the google.protobuf.FieldDescriptorProto message. // // It is used for both normal fields defined within the parent message -// (e.g., MessageDescriptor.Fields) and fields that extend some remote message -// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). +// (e.g., [MessageDescriptor.Fields]) and fields that extend some remote message +// (e.g., [FileDescriptor.Extensions] or [MessageDescriptor.Extensions]). type FieldDescriptor interface { Descriptor @@ -344,7 +344,7 @@ type FieldDescriptor interface { // IsMap reports whether this field represents a map, // where the value type for the associated field is a Map. // It is equivalent to checking whether Cardinality is Repeated, - // that the Kind is MessageKind, and that Message.IsMapEntry reports true. + // that the Kind is MessageKind, and that MessageDescriptor.IsMapEntry reports true. IsMap() bool // MapKey returns the field descriptor for the key in the map entry. @@ -419,7 +419,7 @@ type OneofDescriptor interface { // IsSynthetic reports whether this is a synthetic oneof created to support // proto3 optional semantics. If true, Fields contains exactly one field - // with HasOptionalKeyword specified. + // with FieldDescriptor.HasOptionalKeyword specified. IsSynthetic() bool // Fields is a list of fields belonging to this oneof. @@ -442,10 +442,10 @@ type OneofDescriptors interface { doNotImplement } -// ExtensionDescriptor is an alias of FieldDescriptor for documentation. +// ExtensionDescriptor is an alias of [FieldDescriptor] for documentation. type ExtensionDescriptor = FieldDescriptor -// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. +// ExtensionTypeDescriptor is an [ExtensionDescriptor] with an associated [ExtensionType]. type ExtensionTypeDescriptor interface { ExtensionDescriptor @@ -470,12 +470,12 @@ type ExtensionDescriptors interface { doNotImplement } -// ExtensionType encapsulates an ExtensionDescriptor with a concrete +// ExtensionType encapsulates an [ExtensionDescriptor] with a concrete // Go implementation. The nested field descriptor must be for a extension field. // // While a normal field is a member of the parent message that it is declared -// within (see Descriptor.Parent), an extension field is a member of some other -// target message (see ExtensionDescriptor.Extendee) and may have no +// within (see [Descriptor.Parent]), an extension field is a member of some other +// target message (see [FieldDescriptor.ContainingMessage]) and may have no // relationship with the parent. However, the full name of an extension field is // relative to the parent that it is declared within. // @@ -532,7 +532,7 @@ type ExtensionType interface { // corresponds with the google.protobuf.EnumDescriptorProto message. // // Nested declarations: -// EnumValueDescriptor. +// [EnumValueDescriptor]. type EnumDescriptor interface { Descriptor @@ -548,7 +548,7 @@ type EnumDescriptor interface { } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } -// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. +// EnumType encapsulates an [EnumDescriptor] with a concrete Go implementation. type EnumType interface { // New returns an instance of this enum type with its value set to n. New(n EnumNumber) Enum @@ -610,7 +610,7 @@ type EnumValueDescriptors interface { // ServiceDescriptor describes a service and // corresponds with the google.protobuf.ServiceDescriptorProto message. // -// Nested declarations: MethodDescriptor. +// Nested declarations: [MethodDescriptor]. type ServiceDescriptor interface { Descriptor diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index 37601b7819..a7b0d06ff3 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -27,16 +27,16 @@ type Enum interface { // Message is a reflective interface for a concrete message value, // encapsulating both type and value information for the message. // -// Accessor/mutators for individual fields are keyed by FieldDescriptor. +// Accessor/mutators for individual fields are keyed by [FieldDescriptor]. // For non-extension fields, the descriptor must exactly match the // field known by the parent message. -// For extension fields, the descriptor must implement ExtensionTypeDescriptor, -// extend the parent message (i.e., have the same message FullName), and +// For extension fields, the descriptor must implement [ExtensionTypeDescriptor], +// extend the parent message (i.e., have the same message [FullName]), and // be within the parent's extension range. // -// Each field Value can be a scalar or a composite type (Message, List, or Map). -// See Value for the Go types associated with a FieldDescriptor. -// Providing a Value that is invalid or of an incorrect type panics. +// Each field [Value] can be a scalar or a composite type ([Message], [List], or [Map]). +// See [Value] for the Go types associated with a [FieldDescriptor]. +// Providing a [Value] that is invalid or of an incorrect type panics. type Message interface { // Descriptor returns message descriptor, which contains only the protobuf // type information for the message. @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // "google.golang.org/protobuf/runtime/protoiface".Methods. + // google.golang.org/protobuf/runtime/protoiface.Methods. // Consult the protoiface package documentation for details. ProtoMethods() *methods } @@ -175,8 +175,8 @@ func (b RawFields) IsValid() bool { } // List is a zero-indexed, ordered list. -// The element Value type is determined by FieldDescriptor.Kind. -// Providing a Value that is invalid or of an incorrect type panics. +// The element [Value] type is determined by [FieldDescriptor.Kind]. +// Providing a [Value] that is invalid or of an incorrect type panics. type List interface { // Len reports the number of entries in the List. // Get, Set, and Truncate panic with out of bound indexes. @@ -226,9 +226,9 @@ type List interface { } // Map is an unordered, associative map. -// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. -// The entry Value type is determined by FieldDescriptor.MapValue.Kind. -// Providing a MapKey or Value that is invalid or of an incorrect type panics. +// The entry [MapKey] type is determined by [FieldDescriptor.MapKey].Kind. +// The entry [Value] type is determined by [FieldDescriptor.MapValue].Kind. +// Providing a [MapKey] or [Value] that is invalid or of an incorrect type panics. type Map interface { // Len reports the number of elements in the map. Len() int diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go index 591652541f..654599d449 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -24,19 +24,19 @@ import ( // Unlike the == operator, a NaN is equal to another NaN. // // - Enums are equal if they contain the same number. -// Since Value does not contain an enum descriptor, +// Since [Value] does not contain an enum descriptor, // enum values do not consider the type of the enum. // // - Other scalar values are equal if they contain the same value. // -// - Message values are equal if they belong to the same message descriptor, +// - [Message] values are equal if they belong to the same message descriptor, // have the same set of populated known and extension field values, // and the same set of unknown fields values. // -// - Lists are equal if they are the same length and +// - [List] values are equal if they are the same length and // each corresponding element is equal. // -// - Maps are equal if they have the same set of keys and +// - [Map] values are equal if they have the same set of keys and // the corresponding value for each key is equal. func (v1 Value) Equal(v2 Value) bool { return equalValue(v1, v2) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 08e5ef73fc..1603097311 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -11,7 +11,7 @@ import ( // Value is a union where only one Go type may be set at a time. // The Value is used to represent all possible values a field may take. -// The following shows which Go type is used to represent each proto Kind: +// The following shows which Go type is used to represent each proto [Kind]: // // â•”â•â•â•â•â•â•â•â•â•â•â•â•╤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— // â•‘ Go type │ Protobuf kind â•‘ @@ -31,22 +31,22 @@ import ( // // Multiple protobuf Kinds may be represented by a single Go type if the type // can losslessly represent the information for the proto kind. For example, -// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, +// [Int64Kind], [Sint64Kind], and [Sfixed64Kind] are all represented by int64, // but use different integer encoding methods. // -// The List or Map types are used if the field cardinality is repeated. -// A field is a List if FieldDescriptor.IsList reports true. -// A field is a Map if FieldDescriptor.IsMap reports true. +// The [List] or [Map] types are used if the field cardinality is repeated. +// A field is a [List] if [FieldDescriptor.IsList] reports true. +// A field is a [Map] if [FieldDescriptor.IsMap] reports true. // // Converting to/from a Value and a concrete Go value panics on type mismatch. -// For example, ValueOf("hello").Int() panics because this attempts to +// For example, [ValueOf]("hello").Int() panics because this attempts to // retrieve an int64 from a string. // -// List, Map, and Message Values are called "composite" values. +// [List], [Map], and [Message] Values are called "composite" values. // // A composite Value may alias (reference) memory at some location, // such that changes to the Value updates the that location. -// A composite value acquired with a Mutable method, such as Message.Mutable, +// A composite value acquired with a Mutable method, such as [Message.Mutable], // always references the source object. // // For example: @@ -65,7 +65,7 @@ import ( // // appending to the List here may or may not modify the message. // list.Append(protoreflect.ValueOfInt32(0)) // -// Some operations, such as Message.Get, may return an "empty, read-only" +// Some operations, such as [Message.Get], may return an "empty, read-only" // composite Value. Modifying an empty, read-only value panics. type Value value @@ -306,7 +306,7 @@ func (v Value) Float() float64 { } } -// String returns v as a string. Since this method implements fmt.Stringer, +// String returns v as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (v Value) String() string { switch v.typ { @@ -327,7 +327,7 @@ func (v Value) Bytes() []byte { } } -// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. +// Enum returns v as a [EnumNumber] and panics if the type is not a [EnumNumber]. func (v Value) Enum() EnumNumber { switch v.typ { case enumType: @@ -337,7 +337,7 @@ func (v Value) Enum() EnumNumber { } } -// Message returns v as a Message and panics if the type is not a Message. +// Message returns v as a [Message] and panics if the type is not a [Message]. func (v Value) Message() Message { switch vi := v.getIface().(type) { case Message: @@ -347,7 +347,7 @@ func (v Value) Message() Message { } } -// List returns v as a List and panics if the type is not a List. +// List returns v as a [List] and panics if the type is not a [List]. func (v Value) List() List { switch vi := v.getIface().(type) { case List: @@ -357,7 +357,7 @@ func (v Value) List() List { } } -// Map returns v as a Map and panics if the type is not a Map. +// Map returns v as a [Map] and panics if the type is not a [Map]. func (v Value) Map() Map { switch vi := v.getIface().(type) { case Map: @@ -367,7 +367,7 @@ func (v Value) Map() Map { } } -// MapKey returns v as a MapKey and panics for invalid MapKey types. +// MapKey returns v as a [MapKey] and panics for invalid [MapKey] types. func (v Value) MapKey() MapKey { switch v.typ { case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: @@ -378,8 +378,8 @@ func (v Value) MapKey() MapKey { } // MapKey is used to index maps, where the Go type of the MapKey must match -// the specified key Kind (see MessageDescriptor.IsMapEntry). -// The following shows what Go type is used to represent each proto Kind: +// the specified key [Kind] (see [MessageDescriptor.IsMapEntry]). +// The following shows what Go type is used to represent each proto [Kind]: // // â•”â•â•â•â•â•â•â•â•â•╤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— // â•‘ Go type │ Protobuf kind â•‘ @@ -392,13 +392,13 @@ func (v Value) MapKey() MapKey { // â•‘ string │ StringKind â•‘ // ╚â•â•â•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• // -// A MapKey is constructed and accessed through a Value: +// A MapKey is constructed and accessed through a [Value]: // // k := ValueOf("hash").MapKey() // convert string to MapKey // s := k.String() // convert MapKey to string // -// The MapKey is a strict subset of valid types used in Value; -// converting a Value to a MapKey with an invalid type panics. +// The MapKey is a strict subset of valid types used in [Value]; +// converting a [Value] to a MapKey with an invalid type panics. type MapKey value // IsValid reports whether k is populated with a value. @@ -426,13 +426,13 @@ func (k MapKey) Uint() uint64 { return Value(k).Uint() } -// String returns k as a string. Since this method implements fmt.Stringer, +// String returns k as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (k MapKey) String() string { return Value(k).String() } -// Value returns k as a Value. +// Value returns k as a [Value]. func (k MapKey) Value() Value { return Value(k) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go similarity index 97% rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 702ddf22a2..b1fdbe3e8e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go new file mode 100644 index 0000000000..4354701117 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + ifaceHeader struct { + _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t interface{}) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} +} +func valueOfIface(v interface{}) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() string { + return unsafe.String((*byte)(v.ptr), v.num) +} +func (v Value) getBytes() []byte { + return unsafe.Slice((*byte)(v.ptr), v.num) +} +func (v Value) getIface() (x interface{}) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index aeb5597744..6267dc52a6 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -5,12 +5,12 @@ // Package protoregistry provides data structures to register and lookup // protobuf descriptor types. // -// The Files registry contains file descriptors and provides the ability +// The [Files] registry contains file descriptors and provides the ability // to iterate over the files or lookup a specific descriptor within the files. -// Files only contains protobuf descriptors and has no understanding of Go +// [Files] only contains protobuf descriptors and has no understanding of Go // type information that may be associated with each descriptor. // -// The Types registry contains descriptor types for which there is a known +// The [Types] registry contains descriptor types for which there is a known // Go type associated with that descriptor. It provides the ability to iterate // over the registered types or lookup a type by name. package protoregistry @@ -218,7 +218,7 @@ func (r *Files) checkGenProtoConflict(path string) { // FindDescriptorByName looks up a descriptor by the full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { if r == nil { return nil, NotFound @@ -310,7 +310,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. // This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { @@ -431,7 +431,7 @@ func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflec // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type MessageTypeResolver interface { // FindMessageByName looks up a message by its full name. // E.g., "google.protobuf.Any" @@ -451,7 +451,7 @@ type MessageTypeResolver interface { // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type ExtensionTypeResolver interface { // FindExtensionByName looks up a extension field by the field's full name. // Note that this is the full name of the field as determined by @@ -590,7 +590,7 @@ func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interfac // FindEnumByName looks up an enum by its full name. // E.g., "google.protobuf.Field.Kind". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { if r == nil { return nil, NotFound @@ -611,7 +611,7 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp // FindMessageByName looks up a message by its full name, // e.g. "google.protobuf.Any". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { if r == nil { return nil, NotFound @@ -632,7 +632,7 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // FindMessageByURL looks up a message by a URL identifier. // See documentation on google.protobuf.Any.type_url for the URL format. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // This function is similar to FindMessageByName but // truncates anything before and including '/' in the URL. @@ -662,7 +662,7 @@ func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // where the extension is declared and is unrelated to the full name of the // message being extended. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound @@ -703,7 +703,7 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E // FindExtensionByNumber looks up a extension field by the field number // within some parent message, identified by full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 04c00f737c..38daa858d0 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -48,6 +48,94 @@ import ( sync "sync" ) +// The full set of known editions. +type Edition int32 + +const ( + // A placeholder for an unknown edition value. + Edition_EDITION_UNKNOWN Edition = 0 + // Legacy syntax "editions". These pre-date editions, but behave much like + // distinct editions. These can't be used to specify the edition of proto + // files, but feature definitions must supply proto2/proto3 defaults for + // backwards compatibility. + Edition_EDITION_PROTO2 Edition = 998 + Edition_EDITION_PROTO3 Edition = 999 + // Editions that have been released. The specific values are arbitrary and + // should not be depended on, but they will always be time-ordered for easy + // comparison. + Edition_EDITION_2023 Edition = 1000 + // Placeholder editions for testing feature resolution. These should not be + // used or relyed on outside of tests. + Edition_EDITION_1_TEST_ONLY Edition = 1 + Edition_EDITION_2_TEST_ONLY Edition = 2 + Edition_EDITION_99997_TEST_ONLY Edition = 99997 + Edition_EDITION_99998_TEST_ONLY Edition = 99998 + Edition_EDITION_99999_TEST_ONLY Edition = 99999 +) + +// Enum value maps for Edition. +var ( + Edition_name = map[int32]string{ + 0: "EDITION_UNKNOWN", + 998: "EDITION_PROTO2", + 999: "EDITION_PROTO3", + 1000: "EDITION_2023", + 1: "EDITION_1_TEST_ONLY", + 2: "EDITION_2_TEST_ONLY", + 99997: "EDITION_99997_TEST_ONLY", + 99998: "EDITION_99998_TEST_ONLY", + 99999: "EDITION_99999_TEST_ONLY", + } + Edition_value = map[string]int32{ + "EDITION_UNKNOWN": 0, + "EDITION_PROTO2": 998, + "EDITION_PROTO3": 999, + "EDITION_2023": 1000, + "EDITION_1_TEST_ONLY": 1, + "EDITION_2_TEST_ONLY": 2, + "EDITION_99997_TEST_ONLY": 99997, + "EDITION_99998_TEST_ONLY": 99998, + "EDITION_99999_TEST_ONLY": 99999, + } +) + +func (x Edition) Enum() *Edition { + p := new(Edition) + *p = x + return p +} + +func (x Edition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Edition) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (Edition) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x Edition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *Edition) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = Edition(num) + return nil +} + +// Deprecated: Use Edition.Descriptor instead. +func (Edition) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -80,11 +168,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] + return &file_google_protobuf_descriptor_proto_enumTypes[1] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -125,9 +213,10 @@ const ( FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 + // Group type is deprecated and not supported after google.protobuf. However, Proto3 // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. + // treat group fields as unknown fields. In Editions, the group wire format + // can be enabled via the `message_encoding` feature. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. // New in version 2. @@ -195,11 +284,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -226,21 +315,24 @@ type FieldDescriptorProto_Label int32 const ( // 0 is reserved for errors FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 + // The required label is only allowed in google.protobuf. In proto3 and Editions + // it's explicitly prohibited. In Editions, the `field_presence` feature + // can be used to get this behavior. + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 ) // Enum value maps for FieldDescriptorProto_Label. var ( FieldDescriptorProto_Label_name = map[int32]string{ 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", 3: "LABEL_REPEATED", + 2: "LABEL_REQUIRED", } FieldDescriptorProto_Label_value = map[string]int32{ "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, "LABEL_REPEATED": 3, + "LABEL_REQUIRED": 2, } ) @@ -255,11 +347,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -316,11 +408,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -382,11 +474,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -444,11 +536,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -506,11 +598,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -590,11 +682,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -652,11 +744,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -678,6 +770,363 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} } +type FeatureSet_FieldPresence int32 + +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0 + FeatureSet_EXPLICIT FeatureSet_FieldPresence = 1 + FeatureSet_IMPLICIT FeatureSet_FieldPresence = 2 + FeatureSet_LEGACY_REQUIRED FeatureSet_FieldPresence = 3 +) + +// Enum value maps for FeatureSet_FieldPresence. +var ( + FeatureSet_FieldPresence_name = map[int32]string{ + 0: "FIELD_PRESENCE_UNKNOWN", + 1: "EXPLICIT", + 2: "IMPLICIT", + 3: "LEGACY_REQUIRED", + } + FeatureSet_FieldPresence_value = map[string]int32{ + "FIELD_PRESENCE_UNKNOWN": 0, + "EXPLICIT": 1, + "IMPLICIT": 2, + "LEGACY_REQUIRED": 3, + } +) + +func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence { + p := new(FeatureSet_FieldPresence) + *p = x + return p +} + +func (x FeatureSet_FieldPresence) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() +} + +func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[10] +} + +func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_FieldPresence(num) + return nil +} + +// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead. +func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +type FeatureSet_EnumType int32 + +const ( + FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0 + FeatureSet_OPEN FeatureSet_EnumType = 1 + FeatureSet_CLOSED FeatureSet_EnumType = 2 +) + +// Enum value maps for FeatureSet_EnumType. +var ( + FeatureSet_EnumType_name = map[int32]string{ + 0: "ENUM_TYPE_UNKNOWN", + 1: "OPEN", + 2: "CLOSED", + } + FeatureSet_EnumType_value = map[string]int32{ + "ENUM_TYPE_UNKNOWN": 0, + "OPEN": 1, + "CLOSED": 2, + } +) + +func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType { + p := new(FeatureSet_EnumType) + *p = x + return p +} + +func (x FeatureSet_EnumType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() +} + +func (FeatureSet_EnumType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[11] +} + +func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnumType(num) + return nil +} + +// Deprecated: Use FeatureSet_EnumType.Descriptor instead. +func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1} +} + +type FeatureSet_RepeatedFieldEncoding int32 + +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0 + FeatureSet_PACKED FeatureSet_RepeatedFieldEncoding = 1 + FeatureSet_EXPANDED FeatureSet_RepeatedFieldEncoding = 2 +) + +// Enum value maps for FeatureSet_RepeatedFieldEncoding. +var ( + FeatureSet_RepeatedFieldEncoding_name = map[int32]string{ + 0: "REPEATED_FIELD_ENCODING_UNKNOWN", + 1: "PACKED", + 2: "EXPANDED", + } + FeatureSet_RepeatedFieldEncoding_value = map[string]int32{ + "REPEATED_FIELD_ENCODING_UNKNOWN": 0, + "PACKED": 1, + "EXPANDED": 2, + } +) + +func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding { + p := new(FeatureSet_RepeatedFieldEncoding) + *p = x + return p +} + +func (x FeatureSet_RepeatedFieldEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() +} + +func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[12] +} + +func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_RepeatedFieldEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead. +func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2} +} + +type FeatureSet_Utf8Validation int32 + +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 + FeatureSet_NONE FeatureSet_Utf8Validation = 1 + FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 +) + +// Enum value maps for FeatureSet_Utf8Validation. +var ( + FeatureSet_Utf8Validation_name = map[int32]string{ + 0: "UTF8_VALIDATION_UNKNOWN", + 1: "NONE", + 2: "VERIFY", + } + FeatureSet_Utf8Validation_value = map[string]int32{ + "UTF8_VALIDATION_UNKNOWN": 0, + "NONE": 1, + "VERIFY": 2, + } +) + +func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation { + p := new(FeatureSet_Utf8Validation) + *p = x + return p +} + +func (x FeatureSet_Utf8Validation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() +} + +func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[13] +} + +func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_Utf8Validation(num) + return nil +} + +// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead. +func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3} +} + +type FeatureSet_MessageEncoding int32 + +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0 + FeatureSet_LENGTH_PREFIXED FeatureSet_MessageEncoding = 1 + FeatureSet_DELIMITED FeatureSet_MessageEncoding = 2 +) + +// Enum value maps for FeatureSet_MessageEncoding. +var ( + FeatureSet_MessageEncoding_name = map[int32]string{ + 0: "MESSAGE_ENCODING_UNKNOWN", + 1: "LENGTH_PREFIXED", + 2: "DELIMITED", + } + FeatureSet_MessageEncoding_value = map[string]int32{ + "MESSAGE_ENCODING_UNKNOWN": 0, + "LENGTH_PREFIXED": 1, + "DELIMITED": 2, + } +) + +func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding { + p := new(FeatureSet_MessageEncoding) + *p = x + return p +} + +func (x FeatureSet_MessageEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() +} + +func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[14] +} + +func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_MessageEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead. +func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4} +} + +type FeatureSet_JsonFormat int32 + +const ( + FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0 + FeatureSet_ALLOW FeatureSet_JsonFormat = 1 + FeatureSet_LEGACY_BEST_EFFORT FeatureSet_JsonFormat = 2 +) + +// Enum value maps for FeatureSet_JsonFormat. +var ( + FeatureSet_JsonFormat_name = map[int32]string{ + 0: "JSON_FORMAT_UNKNOWN", + 1: "ALLOW", + 2: "LEGACY_BEST_EFFORT", + } + FeatureSet_JsonFormat_value = map[string]int32{ + "JSON_FORMAT_UNKNOWN": 0, + "ALLOW": 1, + "LEGACY_BEST_EFFORT": 2, + } +) + +func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat { + p := new(FeatureSet_JsonFormat) + *p = x + return p +} + +func (x FeatureSet_JsonFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() +} + +func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[15] +} + +func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_JsonFormat(num) + return nil +} + +// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead. +func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -716,11 +1165,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[16] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -739,7 +1188,7 @@ func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { // Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -822,8 +1271,8 @@ type FileDescriptorProto struct { // // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - // The edition of the proto file, which is an opaque string. - Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` + // The edition of the proto file. + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -942,11 +1391,11 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } -func (x *FileDescriptorProto) GetEdition() string { +func (x *FileDescriptorProto) GetEdition() Edition { if x != nil && x.Edition != nil { return *x.Edition } - return "" + return Edition_EDITION_UNKNOWN } // Describes a message type. @@ -1079,13 +1528,14 @@ type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - // go/protobuf-stripping-extension-declarations - // Like Metadata, but we use a repeated field to hold all extension - // declarations. This should avoid the size increases of transforming a large - // extension range into small ranges in generated binaries. + // For external users: DO NOT USE. We are in the process of open sourcing + // extension declaration and executing internal cleanups before it can be + // used externally. Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The verification state of the range. - // TODO(b/278783756): flip the default to DECLARATION once all empty ranges + // TODO: flip the default to DECLARATION once all empty ranges // are marked as UNVERIFIED. Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` } @@ -1141,6 +1591,13 @@ func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declar return nil } +func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { if x != nil && x.Verification != nil { return *x.Verification @@ -1772,6 +2229,8 @@ type FileOptions struct { // is empty. When this option is not set, the package name will be used for // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -1963,6 +2422,13 @@ func (x *FileOptions) GetRubyPackage() string { return "" } +func (x *FileOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2039,11 +2505,13 @@ type MessageOptions struct { // This should only be used as a temporary measure against broken builds due // to the change in behavior for JSON field name conflicts. // - // TODO(b/261750190) This is legacy behavior we plan to remove once downstream + // TODO This is legacy behavior we plan to remove once downstream // teams have had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2123,6 +2591,13 @@ func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *MessageOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2147,7 +2622,9 @@ type FieldOptions struct { // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. + // false will avoid using packed encoding. This option is prohibited in + // Editions, but the `repeated_field_encoding` feature can be used to control + // the behavior. Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types @@ -2205,11 +2682,12 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. - DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` - Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2320,14 +2798,6 @@ func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { return FieldOptions_RETENTION_UNKNOWN } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { - if x != nil && x.Target != nil { - return *x.Target - } - return FieldOptions_TARGET_TYPE_UNKNOWN -} - func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { if x != nil { return x.Targets @@ -2335,6 +2805,20 @@ func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { return nil } +func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault { + if x != nil { + return x.EditionDefaults + } + return nil +} + +func (x *FieldOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2348,6 +2832,8 @@ type OneofOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2384,6 +2870,13 @@ func (*OneofOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} } +func (x *OneofOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2409,11 +2902,13 @@ type EnumOptions struct { // and strips underscored from the fields before comparison in proto3 only. // The new behavior takes `json_name` into account and applies to proto2 as // well. - // TODO(b/261750190) Remove this legacy behavior once downstream teams have + // TODO Remove this legacy behavior once downstream teams have // had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2477,6 +2972,13 @@ func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *EnumOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2495,13 +2997,20 @@ type EnumValueOptions struct { // for the enum value, or it will be completely ignored; in the very least, // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + // Indicate that fields annotated with this enum value should not be printed + // out when using debug formats, e.g. when the field contains sensitive + // credentials. + DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for EnumValueOptions fields. const ( - Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_DebugRedact = bool(false) ) func (x *EnumValueOptions) Reset() { @@ -2543,6 +3052,20 @@ func (x *EnumValueOptions) GetDeprecated() bool { return Default_EnumValueOptions_Deprecated } +func (x *EnumValueOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *EnumValueOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_EnumValueOptions_DebugRedact +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2556,6 +3079,8 @@ type ServiceOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the service, or it will be completely ignored; in the very least, @@ -2602,6 +3127,13 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} } +func (x *ServiceOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ServiceOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2628,6 +3160,8 @@ type MethodOptions struct { // this is a formalization for deprecating methods. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2684,6 +3218,13 @@ func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { return Default_MethodOptions_IdempotencyLevel } +func (x *MethodOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2794,6 +3335,171 @@ func (x *UninterpretedOption) GetAggregateValue() string { return "" } +// TODO Enums in C++ gencode (and potentially other languages) are +// not well scoped. This means that each of the feature enums below can clash +// with each other. The short names we've chosen maximize call-site +// readability, but leave us very open to this scenario. A future feature will +// be designed and implemented to handle this, hopefully before we ever hit a +// conflict here. +type FeatureSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` +} + +func (x *FeatureSet) Reset() { + *x = FeatureSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet) ProtoMessage() {} + +func (x *FeatureSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead. +func (*FeatureSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence { + if x != nil && x.FieldPresence != nil { + return *x.FieldPresence + } + return FeatureSet_FIELD_PRESENCE_UNKNOWN +} + +func (x *FeatureSet) GetEnumType() FeatureSet_EnumType { + if x != nil && x.EnumType != nil { + return *x.EnumType + } + return FeatureSet_ENUM_TYPE_UNKNOWN +} + +func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding { + if x != nil && x.RepeatedFieldEncoding != nil { + return *x.RepeatedFieldEncoding + } + return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation { + if x != nil && x.Utf8Validation != nil { + return *x.Utf8Validation + } + return FeatureSet_UTF8_VALIDATION_UNKNOWN +} + +func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding { + if x != nil && x.MessageEncoding != nil { + return *x.MessageEncoding + } + return FeatureSet_MESSAGE_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { + if x != nil && x.JsonFormat != nil { + return *x.JsonFormat + } + return FeatureSet_JSON_FORMAT_UNKNOWN +} + +// A compiled specification for the defaults of a set of features. These +// messages are generated from FeatureSet extensions and can be used to seed +// feature resolution. The resolution with this object becomes a simple search +// for the closest matching edition, followed by proto merges. +type FeatureSetDefaults struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` + // The minimum supported edition (inclusive) when this was constructed. + // Editions before this will not have defaults. + MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"` + // The maximum known edition (inclusive) when this was constructed. Editions + // after this will not have reliable defaults. + MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` +} + +func (x *FeatureSetDefaults) Reset() { + *x = FeatureSetDefaults{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults) ProtoMessage() {} + +func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} +} + +func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault { + if x != nil { + return x.Defaults + } + return nil +} + +func (x *FeatureSetDefaults) GetMinimumEdition() Edition { + if x != nil && x.MinimumEdition != nil { + return *x.MinimumEdition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults) GetMaximumEdition() Edition { + if x != nil && x.MaximumEdition != nil { + return *x.MaximumEdition + } + return Edition_EDITION_UNKNOWN +} + // Encapsulates information about the original source file from which a // FileDescriptorProto was generated. type SourceCodeInfo struct { @@ -2855,7 +3561,7 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2868,7 +3574,7 @@ func (x *SourceCodeInfo) String() string { func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2881,7 +3587,7 @@ func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21} } func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { @@ -2907,7 +3613,7 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2920,7 +3626,7 @@ func (x *GeneratedCodeInfo) String() string { func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2933,7 +3639,7 @@ func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22} } func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { @@ -2956,7 +3662,7 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2969,7 +3675,7 @@ func (x *DescriptorProto_ExtensionRange) String() string { func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3021,7 +3727,7 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3034,7 +3740,7 @@ func (x *DescriptorProto_ReservedRange) String() string { func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3078,10 +3784,6 @@ type ExtensionRangeOptions_Declaration struct { // Metadata.type, Declaration.type must have a leading dot for messages // and enums. Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` - // Deprecated. Please use "repeated". - // - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"` // If true, indicates that the number is reserved in the extension range, // and any extension field with the number will fail to compile. Set this // when a declared extension field is deleted. @@ -3094,7 +3796,7 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3107,7 +3809,7 @@ func (x *ExtensionRangeOptions_Declaration) String() string { func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3144,14 +3846,6 @@ func (x *ExtensionRangeOptions_Declaration) GetType() string { return "" } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool { - if x != nil && x.IsRepeated != nil { - return *x.IsRepeated - } - return false -} - func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { if x != nil && x.Reserved != nil { return *x.Reserved @@ -3184,7 +3878,7 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3197,7 +3891,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string { func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3227,6 +3921,61 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { return 0 } +type FieldOptions_EditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. +} + +func (x *FieldOptions_EditionDefault) Reset() { + *x = FieldOptions_EditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_EditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_EditionDefault) ProtoMessage() {} + +func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead. +func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *FieldOptions_EditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_EditionDefault) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value + } + return "" +} + // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). @@ -3244,7 +3993,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3257,7 +4006,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3287,6 +4036,65 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +// A map from every known edition with a unique set of defaults to its +// defaults. Not all editions may be contained here. For a given edition, +// the defaults at the closest matching edition ordered at or before it should +// be used. This field must be in strict ascending order by edition. +type FeatureSetDefaults_FeatureSetEditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { + *x = FeatureSetDefaults_FeatureSetEditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + type SourceCodeInfo_Location struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3388,7 +4196,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3401,7 +4209,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3414,7 +4222,7 @@ func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0} } func (x *SourceCodeInfo_Location) GetPath() []int32 { @@ -3475,7 +4283,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3488,7 +4296,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3501,7 +4309,7 @@ func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0} } func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { @@ -3550,7 +4358,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -3588,527 +4396,687 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, - 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, - 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, - 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, - 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, - 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, - 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, - 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, - 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, - 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, - 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, + 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, - 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, - 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, - 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, - 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, - 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, - 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, - 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, - 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, - 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, - 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, - 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, - 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, - 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, - 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, - 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, - 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, + 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, + 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, - 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xc7, 0x04, 0x0a, 0x15, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x68, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, 0x01, 0x0a, 0x0b, + 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, + 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, + 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, + 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, + 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, + 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, + 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, + 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, + 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, + 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, + 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, + 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, + 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, + 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, - 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, + 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0xca, + 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, + 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, + 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, + 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, + 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, + 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, + 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, + 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, + 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, - 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, - 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, - 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, - 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, - 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, - 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, - 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, - 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, - 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, - 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, - 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, - 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, - 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, - 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, - 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, - 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, - 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, - 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, - 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, - 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a, - 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, - 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, - 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, - 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, - 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, - 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, - 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, - 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, - 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, - 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, - 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, - 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, - 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, - 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, - 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, - 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, - 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, + 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, + 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, + 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, + 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, 0x0a, 0x0e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, + 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, + 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, + 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, + 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, + 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, + 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, - 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, - 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, - 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, - 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, - 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, - 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, - 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, + 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, + 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, + 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, + 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, + 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, + 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, + 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, + 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, + 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, + 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, + 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, + 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2f, 0x0a, + 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, + 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, + 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, + 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, + 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, + 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, + 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, + 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, + 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, + 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, + 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, + 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, + 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, + 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, + 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, + 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, + 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, + 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, + 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, + 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, + 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, + 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, + 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, + 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, + 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, + 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, + 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, + 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, + 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, + 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, + 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, + 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, + 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, + 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfc, 0x09, 0x0a, 0x0a, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x39, 0x88, + 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, + 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, + 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, + 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, + 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, + 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, + 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, + 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, + 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, 0x15, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, - 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, - 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, - 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, - 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, + 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, 0x07, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0x52, 0x0e, + 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, + 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, + 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, + 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, + 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, + 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, + 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, + 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, + 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, + 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, + 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, + 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, + 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, + 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0a, + 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, + 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, + 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, + 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, + 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, + 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, + 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, + 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, + 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, + 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, + 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, + 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, + 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, + 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, + 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, + 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, + 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, + 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, + 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, + 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xea, 0x01, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, + 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, + 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, + 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, + 0x10, 0x9f, 0x8d, 0x06, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, + 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, + 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -4123,103 +5091,136 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel - (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 20: google.protobuf.FileOptions - (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation + (Edition)(0), // 0: google.protobuf.Edition + (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat + (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 27: google.protobuf.FileOptions + (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault + (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType - 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 49, // [49:49] is the sub-list for method output_type - 49, // [49:49] is the sub-list for method input_type - 49, // [49:49] is the sub-list for extension type_name - 49, // [49:49] is the sub-list for extension extendee - 0, // [0:49] is the sub-list for field type_name + 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition + 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet + 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 71, // [71:71] is the sub-list for method output_type + 71, // [71:71] is the sub-list for method input_type + 71, // [71:71] is the sub-list for extension type_name + 71, // [71:71] is the sub-list for extension extendee + 0, // [0:71] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4475,19 +5476,21 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo); i { + switch v := v.(*FeatureSet); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields + case 3: + return &v.extensionFields default: return nil } } file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GeneratedCodeInfo); i { + switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state case 1: @@ -4499,7 +5502,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ExtensionRange); i { + switch v := v.(*SourceCodeInfo); i { case 0: return &v.state case 1: @@ -4511,7 +5514,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ReservedRange); i { + switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state case 1: @@ -4523,7 +5526,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionRangeOptions_Declaration); i { + switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state case 1: @@ -4535,7 +5538,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state case 1: @@ -4547,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { + switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state case 1: @@ -4559,7 +5562,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state case 1: @@ -4571,6 +5574,54 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldOptions_EditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption_NamePart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -4588,8 +5639,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 10, - NumMessages: 28, + NumEnums: 17, + NumMessages: 32, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 580b232f47..9de51be540 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -237,7 +237,8 @@ type Any struct { // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. + // type.googleapis.com. As of May 2023, there are no widely used type server + // implementations and no plans to implement one. // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. diff --git a/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/aws.go b/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/aws.go index e2212cd105..1e276f237a 100644 --- a/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/aws.go +++ b/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/aws.go @@ -1707,7 +1707,7 @@ func (c *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID strin instances, err := c.ec2.DescribeInstances(request) if err != nil { // if err is InstanceNotFound, return false with no error - if isAWSErrorInstanceNotFound(err) { + if IsAWSErrorInstanceNotFound(err) { return false, nil } return false, err @@ -1946,7 +1946,8 @@ func (c *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) } -func isAWSErrorInstanceNotFound(err error) bool { +// IsAWSErrorInstanceNotFound returns true if the specified error is an awserr.Error with the code `InvalidInstanceId.NotFound`. +func IsAWSErrorInstanceNotFound(err error) bool { if err == nil { return false } @@ -4123,13 +4124,13 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS if isNLB(annotations) { // Find the subnets that the ELB will live in - subnetIDs, err := c.getLoadBalancerSubnets(apiService, internalELB) + discoveredSubnetIDs, err := c.getLoadBalancerSubnets(apiService, internalELB) if err != nil { klog.Errorf("Error listing subnets in VPC: %q", err) return nil, err } // Bail out early if there are no subnets - if len(subnetIDs) == 0 { + if len(discoveredSubnetIDs) == 0 { return nil, fmt.Errorf("could not find any suitable subnets for creating the ELB") } @@ -4146,7 +4147,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS loadBalancerName, v2Mappings, instanceIDs, - subnetIDs, + discoveredSubnetIDs, internalELB, annotations, ) @@ -4154,7 +4155,16 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS return nil, err } - subnetCidrs, err := c.getSubnetCidrs(subnetIDs) + // try to get the ensured subnets of the LBs from AZs + var ensuredSubnetIDs []string + var subnetCidrs []string + for _, az := range v2LoadBalancer.AvailabilityZones { + ensuredSubnetIDs = append(ensuredSubnetIDs, *az.SubnetId) + } + if len(ensuredSubnetIDs) == 0 { + return nil, fmt.Errorf("did not find ensured subnets on LB %s", loadBalancerName) + } + subnetCidrs, err = c.getSubnetCidrs(ensuredSubnetIDs) if err != nil { klog.Errorf("Error getting subnet cidrs: %q", err) return nil, err diff --git a/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/aws_fakes.go b/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/aws_fakes.go index 84737b1417..a4e45a1f29 100644 --- a/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/aws_fakes.go +++ b/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/aws_fakes.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "sort" + "strconv" "strings" "github.com/aws/aws-sdk-go/aws" @@ -47,6 +48,8 @@ type FakeAWSServices struct { asg *FakeASG metadata *FakeMetadata kms *FakeKMS + + callCounts map[string]int } // NewFakeAWSServices creates a new FakeAWSServices @@ -79,6 +82,8 @@ func NewFakeAWSServices(clusterID string) *FakeAWSServices { tag.Value = aws.String(clusterID) selfInstance.Tags = []*ec2.Tag{&tag} + s.callCounts = make(map[string]int) + return s } @@ -97,6 +102,15 @@ func (s *FakeAWSServices) WithRegion(region string) *FakeAWSServices { return s } +// countCall increments the counter for the given service, api, and resourceID and returns the resulting call count +func (s *FakeAWSServices) countCall(service string, api string, resourceID string) int { + key := fmt.Sprintf("%s:%s:%s", service, api, resourceID) + s.callCounts[key]++ + count := s.callCounts[key] + klog.Warningf("call count: %s:%d", key, count) + return count +} + // Compute returns a fake EC2 client func (s *FakeAWSServices) Compute(region string) (EC2, error) { return s.ec2, nil @@ -295,6 +309,7 @@ func (ec2i *FakeEC2Impl) DescribeAvailabilityZones(request *ec2.DescribeAvailabi // CreateTags is a mock for CreateTags from EC2 func (ec2i *FakeEC2Impl) CreateTags(input *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) { for _, id := range input.Resources { + callCount := ec2i.aws.countCall("ec2", "CreateTags", *id) if *id == "i-error" { return nil, errors.New("Unable to tag") } @@ -302,6 +317,17 @@ func (ec2i *FakeEC2Impl) CreateTags(input *ec2.CreateTagsInput) (*ec2.CreateTags if *id == "i-not-found" { return nil, awserr.New("InvalidInstanceID.NotFound", "Instance not found", nil) } + // return an Instance not found error for the first `n` calls + // instance ID should be of the format `i-not-found-count-$N-$SUFFIX` + if strings.HasPrefix(*id, "i-not-found-count-") { + notFoundCount, err := strconv.Atoi(strings.Split(*id, "-")[4]) + if err != nil { + panic(err) + } + if callCount < notFoundCount { + return nil, awserr.New("InvalidInstanceID.NotFound", "Instance not found", nil) + } + } } return &ec2.CreateTagsOutput{}, nil } diff --git a/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/aws_loadbalancer.go b/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/aws_loadbalancer.go index 6319df1271..c39ea3de37 100644 --- a/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/aws_loadbalancer.go +++ b/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/aws_loadbalancer.go @@ -138,7 +138,7 @@ func getKeyValuePropertiesFromAnnotation(annotations map[string]string, annotati } // ensureLoadBalancerv2 ensures a v2 load balancer is created -func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBalancerName string, mappings []nlbPortMapping, instanceIDs, subnetIDs []string, internalELB bool, annotations map[string]string) (*elbv2.LoadBalancer, error) { +func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBalancerName string, mappings []nlbPortMapping, instanceIDs, discoveredSubnetIDs []string, internalELB bool, annotations map[string]string) (*elbv2.LoadBalancer, error) { loadBalancer, err := c.describeLoadBalancerv2(loadBalancerName) if err != nil { return nil, err @@ -165,14 +165,14 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa var allocationIDs []string if eipList, present := annotations[ServiceAnnotationLoadBalancerEIPAllocations]; present { allocationIDs = strings.Split(eipList, ",") - if len(allocationIDs) != len(subnetIDs) { - return nil, fmt.Errorf("error creating load balancer: Must have same number of EIP AllocationIDs (%d) and SubnetIDs (%d)", len(allocationIDs), len(subnetIDs)) + if len(allocationIDs) != len(discoveredSubnetIDs) { + return nil, fmt.Errorf("error creating load balancer: Must have same number of EIP AllocationIDs (%d) and SubnetIDs (%d)", len(allocationIDs), len(discoveredSubnetIDs)) } } // We are supposed to specify one subnet per AZ. // TODO: What happens if we have more than one subnet per AZ? - createRequest.SubnetMappings = createSubnetMappings(subnetIDs, allocationIDs) + createRequest.SubnetMappings = createSubnetMappings(discoveredSubnetIDs, allocationIDs) for k, v := range tags { createRequest.Tags = append(createRequest.Tags, &elbv2.Tag{ diff --git a/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/tags.go b/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/tags.go index beaaf0ca7d..9b82436097 100644 --- a/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/tags.go +++ b/vendor/k8s.io/cloud-provider-aws/pkg/providers/v1/tags.go @@ -344,7 +344,7 @@ func (c *Cloud) UntagResource(resourceID string, tags map[string]string) error { if err != nil { // An instance not found should not fail the untagging workflow as it // would for tagging, since the target state is already reached. - if isAWSErrorInstanceNotFound(err) { + if IsAWSErrorInstanceNotFound(err) { klog.Infof("Couldn't find resource when trying to untag it hence skipping it, %v", err) return nil } diff --git a/vendor/k8s.io/cloud-provider/cloud.go b/vendor/k8s.io/cloud-provider/cloud.go index c9a04085f4..d4c11dc664 100644 --- a/vendor/k8s.io/cloud-provider/cloud.go +++ b/vendor/k8s.io/cloud-provider/cloud.go @@ -98,6 +98,8 @@ func DefaultLoadBalancerName(service *v1.Service) string { } // GetInstanceProviderID builds a ProviderID for a node in a cloud. +// Note that if the instance does not exist, we must return ("", cloudprovider.InstanceNotFound) +// cloudprovider.InstanceNotFound should NOT be returned for instances that exist but are stopped/sleeping func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types.NodeName) (string, error) { instances, ok := cloud.Instances() if !ok { @@ -108,8 +110,11 @@ func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types. if err == NotImplemented { return "", err } + if err == InstanceNotFound { + return "", err + } - return "", fmt.Errorf("failed to get instance ID from cloud provider: %v", err) + return "", fmt.Errorf("failed to get instance ID from cloud provider: %w", err) } return cloud.ProviderName() + "://" + instanceID, nil } @@ -285,6 +290,7 @@ type Zones interface { } // PVLabeler is an abstract, pluggable interface for fetching labels for volumes +// DEPRECATED: PVLabeler is deprecated in favor of CSI topology feature. type PVLabeler interface { GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) } diff --git a/vendor/k8s.io/cloud-provider/service/helpers/helper.go b/vendor/k8s.io/cloud-provider/service/helpers/helper.go index e363c7db2c..fd436c3d37 100644 --- a/vendor/k8s.io/cloud-provider/service/helpers/helper.go +++ b/vendor/k8s.io/cloud-provider/service/helpers/helper.go @@ -180,5 +180,8 @@ func ingressEqual(lhs, rhs *v1.LoadBalancerIngress) bool { if lhs.Hostname != rhs.Hostname { return false } + if lhs.IPMode != rhs.IPMode { + return false + } return true } diff --git a/vendor/modules.txt b/vendor/modules.txt index e58a46c85d..b84c9933f5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -97,7 +97,7 @@ github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs github.com/aws/amazon-ec2-instance-selector/v2/pkg/sorter -# github.com/aws/aws-sdk-go v1.48.15 +# github.com/aws/aws-sdk-go v1.49.13 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -178,7 +178,7 @@ github.com/beorn7/perks/quantile # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 -# github.com/cert-manager/cert-manager v1.13.2 +# github.com/cert-manager/cert-manager v1.13.3 ## explicit; go 1.20 github.com/cert-manager/cert-manager/pkg/apis/acme github.com/cert-manager/cert-manager/pkg/apis/acme/v1 @@ -340,7 +340,7 @@ github.com/go-errors/errors # github.com/go-ini/ini v1.67.0 ## explicit github.com/go-ini/ini -# github.com/go-logr/logr v1.3.0 +# github.com/go-logr/logr v1.4.1 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -485,7 +485,7 @@ github.com/google/s2a-go/stream # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 ## explicit; go 1.13 github.com/google/shlex -# github.com/google/uuid v1.4.0 +# github.com/google/uuid v1.5.0 ## explicit github.com/google/uuid # github.com/googleapis/enterprise-certificate-proxy v0.3.2 @@ -594,7 +594,7 @@ github.com/hashicorp/hcl/json/token # github.com/hashicorp/memberlist v0.3.1 ## explicit; go 1.12 github.com/hashicorp/memberlist -# github.com/hetznercloud/hcloud-go v1.52.0 +# github.com/hetznercloud/hcloud-go v1.53.0 ## explicit; go 1.19 github.com/hetznercloud/hcloud-go/hcloud github.com/hetznercloud/hcloud-go/hcloud/internal/instrumentation @@ -657,9 +657,9 @@ github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.13 ## explicit; go 1.9 github.com/mattn/go-runewidth -# github.com/matttproud/golang_protobuf_extensions v1.0.4 -## explicit; go 1.9 -github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 +## explicit; go 1.19 +github.com/matttproud/golang_protobuf_extensions/v2/pbutil # github.com/miekg/dns v1.1.55 ## explicit; go 1.19 github.com/miekg/dns @@ -768,21 +768,21 @@ github.com/pkg/sftp/internal/encoding/ssh/filexfer # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.17.0 +# github.com/prometheus/client_golang v1.18.0 ## explicit; go 1.19 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 -## explicit; go 1.18 +# github.com/prometheus/client_model v0.5.0 +## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.44.0 -## explicit; go 1.18 +# github.com/prometheus/common v0.45.0 +## explicit; go 1.20 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.11.1 +# github.com/prometheus/procfs v0.12.0 ## explicit; go 1.19 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs @@ -802,8 +802,9 @@ github.com/sagikazarmark/slog-shim # github.com/sahilm/fuzzy v0.1.0 ## explicit github.com/sahilm/fuzzy -# github.com/scaleway/scaleway-sdk-go v1.0.0-beta.21 +# github.com/scaleway/scaleway-sdk-go v1.0.0-beta.22 ## explicit; go 1.17 +github.com/scaleway/scaleway-sdk-go/api/block/v1alpha1 github.com/scaleway/scaleway-sdk-go/api/domain/v2beta1 github.com/scaleway/scaleway-sdk-go/api/iam/v1alpha1 github.com/scaleway/scaleway-sdk-go/api/instance/v1 @@ -811,6 +812,7 @@ github.com/scaleway/scaleway-sdk-go/api/ipam/v1alpha1 github.com/scaleway/scaleway-sdk-go/api/lb/v1 github.com/scaleway/scaleway-sdk-go/api/marketplace/v1 github.com/scaleway/scaleway-sdk-go/api/marketplace/v2 +github.com/scaleway/scaleway-sdk-go/api/std github.com/scaleway/scaleway-sdk-go/internal/async github.com/scaleway/scaleway-sdk-go/internal/auth github.com/scaleway/scaleway-sdk-go/internal/errors @@ -854,7 +856,7 @@ github.com/spf13/cobra/doc # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/spf13/viper v1.18.0 +# github.com/spf13/viper v1.18.2 ## explicit; go 1.18 github.com/spf13/viper github.com/spf13/viper/internal/encoding @@ -865,6 +867,7 @@ github.com/spf13/viper/internal/encoding/javaproperties github.com/spf13/viper/internal/encoding/json github.com/spf13/viper/internal/encoding/toml github.com/spf13/viper/internal/encoding/yaml +github.com/spf13/viper/internal/features # github.com/spotinst/spotinst-sdk-go v1.171.0 ## explicit; go 1.16 github.com/spotinst/spotinst-sdk-go/service/elastigroup @@ -979,7 +982,7 @@ go.starlark.net/syntax # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# golang.org/x/crypto v0.16.0 +# golang.org/x/crypto v0.17.0 ## explicit; go 1.18 golang.org/x/crypto/bcrypt golang.org/x/crypto/blake2b @@ -1004,7 +1007,7 @@ golang.org/x/crypto/scrypt golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb +# golang.org/x/exp v0.0.0-20231226003508-02704c960a9b ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps @@ -1020,7 +1023,6 @@ golang.org/x/mod/semver # golang.org/x/net v0.19.0 ## explicit; go 1.18 golang.org/x/net/bpf -golang.org/x/net/context golang.org/x/net/html golang.org/x/net/html/atom golang.org/x/net/http/httpguts @@ -1104,7 +1106,7 @@ golang.org/x/tools/internal/versions # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/api v0.153.0 +# google.golang.org/api v0.154.0 ## explicit; go 1.19 google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v0.alpha @@ -1129,7 +1131,7 @@ google.golang.org/api/storage/v1 google.golang.org/api/tpu/v1 google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/appengine v1.6.7 +# google.golang.org/appengine v1.6.8 ## explicit; go 1.11 google.golang.org/appengine google.golang.org/appengine/internal @@ -1141,15 +1143,15 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 +# google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f ## explicit; go 1.19 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.59.0 +# google.golang.org/grpc v1.60.1 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1186,6 +1188,7 @@ google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig @@ -1197,12 +1200,13 @@ google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.31.0 -## explicit; go 1.11 +# google.golang.org/protobuf v1.32.0 +## explicit; go 1.17 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -1264,7 +1268,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# helm.sh/helm/v3 v3.13.2 +# helm.sh/helm/v3 v3.13.3 ## explicit; go 1.19 helm.sh/helm/v3/internal/ignore helm.sh/helm/v3/internal/sympath @@ -1339,7 +1343,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.28.3 +# k8s.io/apiextensions-apiserver v0.28.4 ## explicit; go 1.20 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -1738,16 +1742,16 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.28.3 -## explicit; go 1.20 +# k8s.io/cloud-provider v0.29.0 +## explicit; go 1.21 k8s.io/cloud-provider k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/cloud-provider-aws v1.28.3 -## explicit; go 1.20 +# k8s.io/cloud-provider-aws v1.29.1 +## explicit; go 1.21 k8s.io/cloud-provider-aws/pkg/providers/v1 # k8s.io/cloud-provider-gcp/providers v0.28.2 ## explicit; go 1.20 @@ -1771,8 +1775,8 @@ k8s.io/component-base/version # k8s.io/component-helpers v0.29.0 ## explicit; go 1.21 k8s.io/component-helpers/node/util -# k8s.io/csi-translation-lib v0.28.3 -## explicit; go 1.20 +# k8s.io/csi-translation-lib v0.29.0 +## explicit; go 1.21 k8s.io/csi-translation-lib/plugins # k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 ## explicit; go 1.13